summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/dfg
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2015-05-20 09:56:07 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2015-05-20 09:56:07 +0000
commit41386e9cb918eed93b3f13648cbef387e371e451 (patch)
treea97f9d7bd1d9d091833286085f72da9d83fd0606 /Source/JavaScriptCore/dfg
parente15dd966d523731101f70ccf768bba12435a0208 (diff)
downloadWebKitGtk-tarball-41386e9cb918eed93b3f13648cbef387e371e451.tar.gz
webkitgtk-2.4.9webkitgtk-2.4.9
Diffstat (limited to 'Source/JavaScriptCore/dfg')
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractHeap.cpp6
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractHeap.h104
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractInterpreter.h68
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h1775
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractValue.cpp284
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractValue.h276
-rw-r--r--Source/JavaScriptCore/dfg/DFGAdaptiveInferredPropertyValueWatchpoint.cpp113
-rw-r--r--Source/JavaScriptCore/dfg/DFGAdaptiveInferredPropertyValueWatchpoint.h76
-rw-r--r--Source/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.cpp76
-rw-r--r--Source/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.h57
-rw-r--r--Source/JavaScriptCore/dfg/DFGAdjacencyList.h60
-rw-r--r--Source/JavaScriptCore/dfg/DFGAllocator.h26
-rw-r--r--Source/JavaScriptCore/dfg/DFGAnalysis.h12
-rw-r--r--Source/JavaScriptCore/dfg/DFGArgumentPosition.h14
-rw-r--r--Source/JavaScriptCore/dfg/DFGArgumentsEliminationPhase.cpp625
-rw-r--r--Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp798
-rw-r--r--Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.h (renamed from Source/JavaScriptCore/dfg/DFGVarargsForwardingPhase.h)18
-rw-r--r--Source/JavaScriptCore/dfg/DFGArgumentsUtilities.cpp98
-rw-r--r--Source/JavaScriptCore/dfg/DFGArgumentsUtilities.h47
-rw-r--r--Source/JavaScriptCore/dfg/DFGArithMode.cpp1
-rw-r--r--Source/JavaScriptCore/dfg/DFGArithMode.h43
-rw-r--r--Source/JavaScriptCore/dfg/DFGArrayMode.cpp252
-rw-r--r--Source/JavaScriptCore/dfg/DFGArrayMode.h62
-rw-r--r--Source/JavaScriptCore/dfg/DFGArrayifySlowPathGenerator.h20
-rw-r--r--Source/JavaScriptCore/dfg/DFGAtTailAbstractState.cpp9
-rw-r--r--Source/JavaScriptCore/dfg/DFGAtTailAbstractState.h10
-rw-r--r--Source/JavaScriptCore/dfg/DFGAvailability.cpp1
-rw-r--r--Source/JavaScriptCore/dfg/DFGAvailability.h27
-rw-r--r--Source/JavaScriptCore/dfg/DFGAvailabilityMap.cpp109
-rw-r--r--Source/JavaScriptCore/dfg/DFGAvailabilityMap.h91
-rw-r--r--Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.cpp110
-rw-r--r--Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGBasicBlock.cpp51
-rw-r--r--Source/JavaScriptCore/dfg/DFGBasicBlock.h156
-rw-r--r--Source/JavaScriptCore/dfg/DFGBasicBlockInlines.h44
-rw-r--r--Source/JavaScriptCore/dfg/DFGBinarySwitch.cpp196
-rw-r--r--Source/JavaScriptCore/dfg/DFGBinarySwitch.h142
-rw-r--r--Source/JavaScriptCore/dfg/DFGBlockInsertionSet.cpp13
-rw-r--r--Source/JavaScriptCore/dfg/DFGBlockInsertionSet.h14
-rw-r--r--Source/JavaScriptCore/dfg/DFGBlockMap.h111
-rw-r--r--Source/JavaScriptCore/dfg/DFGBlockMapInlines.h46
-rw-r--r--Source/JavaScriptCore/dfg/DFGBlockSet.cpp43
-rw-r--r--Source/JavaScriptCore/dfg/DFGBlockSet.h151
-rw-r--r--Source/JavaScriptCore/dfg/DFGBlockSetInlines.h46
-rw-r--r--Source/JavaScriptCore/dfg/DFGBlockWorklist.cpp86
-rw-r--r--Source/JavaScriptCore/dfg/DFGBlockWorklist.h184
-rw-r--r--Source/JavaScriptCore/dfg/DFGBranchDirection.h18
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp3650
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeParser.h11
-rw-r--r--Source/JavaScriptCore/dfg/DFGCFAPhase.cpp71
-rw-r--r--Source/JavaScriptCore/dfg/DFGCFAPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp121
-rw-r--r--Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.cpp151
-rw-r--r--Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGCSEPhase.cpp1845
-rw-r--r--Source/JavaScriptCore/dfg/DFGCSEPhase.h26
-rw-r--r--Source/JavaScriptCore/dfg/DFGCallArrayAllocatorSlowPathGenerator.h4
-rw-r--r--Source/JavaScriptCore/dfg/DFGCallCreateDirectArgumentsSlowPathGenerator.h83
-rw-r--r--Source/JavaScriptCore/dfg/DFGCapabilities.cpp91
-rw-r--r--Source/JavaScriptCore/dfg/DFGCapabilities.h79
-rw-r--r--Source/JavaScriptCore/dfg/DFGCleanUpPhase.cpp92
-rw-r--r--Source/JavaScriptCore/dfg/DFGCleanUpPhase.h43
-rw-r--r--Source/JavaScriptCore/dfg/DFGClobberSet.cpp23
-rw-r--r--Source/JavaScriptCore/dfg/DFGClobberSet.h8
-rw-r--r--Source/JavaScriptCore/dfg/DFGClobberize.cpp39
-rw-r--r--Source/JavaScriptCore/dfg/DFGClobberize.h864
-rw-r--r--Source/JavaScriptCore/dfg/DFGCombinedLiveness.cpp81
-rw-r--r--Source/JavaScriptCore/dfg/DFGCombinedLiveness.h53
-rw-r--r--Source/JavaScriptCore/dfg/DFGCommon.cpp58
-rw-r--r--Source/JavaScriptCore/dfg/DFGCommon.h155
-rw-r--r--Source/JavaScriptCore/dfg/DFGCommonData.cpp32
-rw-r--r--Source/JavaScriptCore/dfg/DFGCommonData.h27
-rw-r--r--Source/JavaScriptCore/dfg/DFGCompilationKey.cpp8
-rw-r--r--Source/JavaScriptCore/dfg/DFGCompilationKey.h3
-rw-r--r--Source/JavaScriptCore/dfg/DFGCompilationMode.cpp2
-rw-r--r--Source/JavaScriptCore/dfg/DFGCompilationMode.h11
-rw-r--r--Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp747
-rw-r--r--Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGConstantHoistingPhase.cpp149
-rw-r--r--Source/JavaScriptCore/dfg/DFGConstantHoistingPhase.h43
-rw-r--r--Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.cpp8
-rw-r--r--Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGDCEPhase.cpp219
-rw-r--r--Source/JavaScriptCore/dfg/DFGDCEPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.cpp36
-rw-r--r--Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.h15
-rw-r--r--Source/JavaScriptCore/dfg/DFGDesiredStructureChains.cpp (renamed from Source/JavaScriptCore/dfg/DFGTransition.cpp)24
-rw-r--r--Source/JavaScriptCore/dfg/DFGDesiredStructureChains.h (renamed from Source/JavaScriptCore/dfg/DFGGraphSafepoint.h)33
-rw-r--r--Source/JavaScriptCore/dfg/DFGDesiredTransitions.cpp19
-rw-r--r--Source/JavaScriptCore/dfg/DFGDesiredTransitions.h6
-rw-r--r--Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.cpp59
-rw-r--r--Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.h209
-rw-r--r--Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.cpp42
-rw-r--r--Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.h14
-rw-r--r--Source/JavaScriptCore/dfg/DFGDesiredWriteBarriers.cpp (renamed from Source/JavaScriptCore/dfg/DFGStoreBarrierInsertionPhase.h)68
-rw-r--r--Source/JavaScriptCore/dfg/DFGDesiredWriteBarriers.h102
-rw-r--r--Source/JavaScriptCore/dfg/DFGDisassembler.cpp13
-rw-r--r--Source/JavaScriptCore/dfg/DFGDisassembler.h32
-rw-r--r--Source/JavaScriptCore/dfg/DFGDoesGC.cpp257
-rw-r--r--Source/JavaScriptCore/dfg/DFGDoesGC.h43
-rw-r--r--Source/JavaScriptCore/dfg/DFGDominators.cpp466
-rw-r--r--Source/JavaScriptCore/dfg/DFGDominators.h172
-rw-r--r--Source/JavaScriptCore/dfg/DFGDriver.cpp65
-rw-r--r--Source/JavaScriptCore/dfg/DFGDriver.h11
-rw-r--r--Source/JavaScriptCore/dfg/DFGEdge.cpp9
-rw-r--r--Source/JavaScriptCore/dfg/DFGEdge.h40
-rw-r--r--Source/JavaScriptCore/dfg/DFGEdgeDominates.h6
-rw-r--r--Source/JavaScriptCore/dfg/DFGEdgeUsesStructure.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGEpoch.cpp43
-rw-r--r--Source/JavaScriptCore/dfg/DFGEpoch.h124
-rw-r--r--Source/JavaScriptCore/dfg/DFGFailedFinalizer.cpp7
-rw-r--r--Source/JavaScriptCore/dfg/DFGFailedFinalizer.h3
-rw-r--r--Source/JavaScriptCore/dfg/DFGFiltrationResult.h16
-rw-r--r--Source/JavaScriptCore/dfg/DFGFinalizer.cpp1
-rw-r--r--Source/JavaScriptCore/dfg/DFGFinalizer.h3
-rw-r--r--Source/JavaScriptCore/dfg/DFGFixupPhase.cpp1432
-rw-r--r--Source/JavaScriptCore/dfg/DFGFixupPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGFlushFormat.cpp7
-rw-r--r--Source/JavaScriptCore/dfg/DFGFlushFormat.h36
-rw-r--r--Source/JavaScriptCore/dfg/DFGFlushLivenessAnalysisPhase.cpp208
-rw-r--r--Source/JavaScriptCore/dfg/DFGFlushLivenessAnalysisPhase.h (renamed from Source/JavaScriptCore/dfg/DFGIntegerCheckCombiningPhase.h)16
-rw-r--r--Source/JavaScriptCore/dfg/DFGFlushedAt.cpp6
-rw-r--r--Source/JavaScriptCore/dfg/DFGFlushedAt.h4
-rw-r--r--Source/JavaScriptCore/dfg/DFGForAllKills.h190
-rw-r--r--Source/JavaScriptCore/dfg/DFGFrozenValue.cpp55
-rw-r--r--Source/JavaScriptCore/dfg/DFGFrozenValue.h129
-rw-r--r--Source/JavaScriptCore/dfg/DFGFunctionWhitelist.cpp115
-rw-r--r--Source/JavaScriptCore/dfg/DFGGenerationInfo.h15
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.cpp992
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.h844
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraphSafepoint.cpp48
-rw-r--r--Source/JavaScriptCore/dfg/DFGHeapLocation.cpp150
-rw-r--r--Source/JavaScriptCore/dfg/DFGHeapLocation.h164
-rw-r--r--Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp175
-rw-r--r--Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.h19
-rw-r--r--Source/JavaScriptCore/dfg/DFGInsertionSet.h98
-rw-r--r--Source/JavaScriptCore/dfg/DFGIntegerCheckCombiningPhase.cpp404
-rw-r--r--Source/JavaScriptCore/dfg/DFGIntegerRangeOptimizationPhase.cpp1729
-rw-r--r--Source/JavaScriptCore/dfg/DFGIntegerRangeOptimizationPhase.h47
-rw-r--r--Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.cpp23
-rw-r--r--Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCode.cpp38
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCode.h17
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.cpp261
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.h105
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp30
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITFinalizer.h7
-rw-r--r--Source/JavaScriptCore/dfg/DFGJumpReplacement.cpp1
-rw-r--r--Source/JavaScriptCore/dfg/DFGLICMPhase.cpp100
-rw-r--r--Source/JavaScriptCore/dfg/DFGLICMPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGLazyJSValue.cpp46
-rw-r--r--Source/JavaScriptCore/dfg/DFGLazyJSValue.h37
-rw-r--r--Source/JavaScriptCore/dfg/DFGLazyNode.cpp47
-rw-r--r--Source/JavaScriptCore/dfg/DFGLazyNode.h187
-rw-r--r--Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.cpp43
-rw-r--r--Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGLongLivedState.cpp2
-rw-r--r--Source/JavaScriptCore/dfg/DFGLongLivedState.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.cpp23
-rw-r--r--Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGMayExit.cpp133
-rw-r--r--Source/JavaScriptCore/dfg/DFGMayExit.h46
-rw-r--r--Source/JavaScriptCore/dfg/DFGMinifiedGraph.cpp53
-rw-r--r--Source/JavaScriptCore/dfg/DFGMinifiedGraph.h18
-rw-r--r--Source/JavaScriptCore/dfg/DFGMinifiedID.h11
-rw-r--r--Source/JavaScriptCore/dfg/DFGMinifiedNode.cpp13
-rw-r--r--Source/JavaScriptCore/dfg/DFGMinifiedNode.h37
-rw-r--r--Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.cpp146
-rw-r--r--Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.h44
-rw-r--r--Source/JavaScriptCore/dfg/DFGMultiGetByOffsetData.cpp99
-rw-r--r--Source/JavaScriptCore/dfg/DFGMultiGetByOffsetData.h154
-rw-r--r--Source/JavaScriptCore/dfg/DFGNaiveDominators.cpp135
-rw-r--r--Source/JavaScriptCore/dfg/DFGNaiveDominators.h71
-rw-r--r--Source/JavaScriptCore/dfg/DFGNaturalLoops.cpp10
-rw-r--r--Source/JavaScriptCore/dfg/DFGNaturalLoops.h11
-rw-r--r--Source/JavaScriptCore/dfg/DFGNode.cpp155
-rw-r--r--Source/JavaScriptCore/dfg/DFGNode.h1269
-rw-r--r--Source/JavaScriptCore/dfg/DFGNodeAllocator.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGNodeFlags.cpp36
-rw-r--r--Source/JavaScriptCore/dfg/DFGNodeFlags.h105
-rw-r--r--Source/JavaScriptCore/dfg/DFGNodeOrigin.h68
-rw-r--r--Source/JavaScriptCore/dfg/DFGNodeType.h243
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.cpp186
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.h24
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSREntry.cpp157
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSREntry.h8
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.cpp50
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExit.cpp2
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExit.h10
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitBase.cpp10
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitBase.h12
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompilationInfo.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp85
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h25
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp227
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp220
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp183
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.h4
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitFuzz.cpp50
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitFuzz.h53
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.cpp1
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp6
-rw-r--r--Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp2136
-rw-r--r--Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.h46
-rw-r--r--Source/JavaScriptCore/dfg/DFGObjectMaterializationData.cpp63
-rw-r--r--Source/JavaScriptCore/dfg/DFGObjectMaterializationData.h77
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.cpp595
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.h47
-rw-r--r--Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.cpp188
-rw-r--r--Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.h43
-rw-r--r--Source/JavaScriptCore/dfg/DFGPhase.cpp10
-rw-r--r--Source/JavaScriptCore/dfg/DFGPhase.h8
-rw-r--r--Source/JavaScriptCore/dfg/DFGPhiChildren.h92
-rw-r--r--Source/JavaScriptCore/dfg/DFGPlan.cpp429
-rw-r--r--Source/JavaScriptCore/dfg/DFGPlan.h50
-rw-r--r--Source/JavaScriptCore/dfg/DFGPrePostNumbering.cpp89
-rw-r--r--Source/JavaScriptCore/dfg/DFGPrePostNumbering.h108
-rw-r--r--Source/JavaScriptCore/dfg/DFGPreciseLocalClobberize.h175
-rw-r--r--Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.cpp2
-rw-r--r--Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp445
-rw-r--r--Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h4
-rw-r--r--Source/JavaScriptCore/dfg/DFGPromoteHeapAccess.h101
-rw-r--r--Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.cpp113
-rw-r--r--Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.h220
-rw-r--r--Source/JavaScriptCore/dfg/DFGPureValue.cpp52
-rw-r--r--Source/JavaScriptCore/dfg/DFGPureValue.h145
-rw-r--r--Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.cpp556
-rw-r--r--Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.h46
-rw-r--r--Source/JavaScriptCore/dfg/DFGResurrectionForValidationPhase.cpp (renamed from Source/JavaScriptCore/dfg/DFGPhiChildren.cpp)55
-rw-r--r--Source/JavaScriptCore/dfg/DFGResurrectionForValidationPhase.h (renamed from Source/JavaScriptCore/dfg/DFGArgumentsEliminationPhase.h)21
-rw-r--r--Source/JavaScriptCore/dfg/DFGSSACalculator.cpp150
-rw-r--r--Source/JavaScriptCore/dfg/DFGSSACalculator.h263
-rw-r--r--Source/JavaScriptCore/dfg/DFGSSAConversionPhase.cpp601
-rw-r--r--Source/JavaScriptCore/dfg/DFGSSAConversionPhase.h11
-rw-r--r--Source/JavaScriptCore/dfg/DFGSSALoweringPhase.cpp35
-rw-r--r--Source/JavaScriptCore/dfg/DFGSafeToExecute.h186
-rw-r--r--Source/JavaScriptCore/dfg/DFGSafepoint.cpp127
-rw-r--r--Source/JavaScriptCore/dfg/DFGSafepoint.h89
-rw-r--r--Source/JavaScriptCore/dfg/DFGSaneStringGetByValSlowPathGenerator.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGScannable.h50
-rw-r--r--Source/JavaScriptCore/dfg/DFGScoreBoard.h16
-rw-r--r--Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGSlowPathGenerator.h84
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp2908
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h616
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp2386
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp2806
-rw-r--r--Source/JavaScriptCore/dfg/DFGStackLayoutPhase.cpp166
-rw-r--r--Source/JavaScriptCore/dfg/DFGStackLayoutPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGStaticExecutionCountEstimationPhase.cpp108
-rw-r--r--Source/JavaScriptCore/dfg/DFGStaticExecutionCountEstimationPhase.h53
-rw-r--r--Source/JavaScriptCore/dfg/DFGStoreBarrierElisionPhase.cpp155
-rw-r--r--Source/JavaScriptCore/dfg/DFGStoreBarrierElisionPhase.h (renamed from Source/JavaScriptCore/dfg/DFGFunctionWhitelist.h)34
-rw-r--r--Source/JavaScriptCore/dfg/DFGStoreBarrierInsertionPhase.cpp528
-rw-r--r--Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp217
-rw-r--r--Source/JavaScriptCore/dfg/DFGStructureAbstractValue.cpp399
-rw-r--r--Source/JavaScriptCore/dfg/DFGStructureAbstractValue.h371
-rw-r--r--Source/JavaScriptCore/dfg/DFGStructureClobberState.h73
-rw-r--r--Source/JavaScriptCore/dfg/DFGStructureRegistrationPhase.cpp187
-rw-r--r--Source/JavaScriptCore/dfg/DFGStructureRegistrationPhase.h54
-rw-r--r--Source/JavaScriptCore/dfg/DFGThreadData.cpp49
-rw-r--r--Source/JavaScriptCore/dfg/DFGThunks.cpp46
-rw-r--r--Source/JavaScriptCore/dfg/DFGThunks.h5
-rw-r--r--Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.cpp92
-rw-r--r--Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.cpp8
-rw-r--r--Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.h5
-rw-r--r--Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp29
-rw-r--r--Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.h5
-rw-r--r--Source/JavaScriptCore/dfg/DFGTransition.h68
-rw-r--r--Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.cpp66
-rw-r--r--Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGUnificationPhase.cpp5
-rw-r--r--Source/JavaScriptCore/dfg/DFGUnificationPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGUseKind.cpp81
-rw-r--r--Source/JavaScriptCore/dfg/DFGUseKind.h117
-rw-r--r--Source/JavaScriptCore/dfg/DFGValidate.cpp264
-rw-r--r--Source/JavaScriptCore/dfg/DFGValidate.h4
-rw-r--r--Source/JavaScriptCore/dfg/DFGValueRecoveryOverride.h (renamed from Source/JavaScriptCore/dfg/DFGThreadData.h)37
-rw-r--r--Source/JavaScriptCore/dfg/DFGValueSource.cpp24
-rw-r--r--Source/JavaScriptCore/dfg/DFGValueSource.h19
-rw-r--r--Source/JavaScriptCore/dfg/DFGValueStrength.cpp51
-rw-r--r--Source/JavaScriptCore/dfg/DFGValueStrength.h70
-rw-r--r--Source/JavaScriptCore/dfg/DFGVarargsForwardingPhase.cpp321
-rw-r--r--Source/JavaScriptCore/dfg/DFGVariableAccessData.cpp222
-rw-r--r--Source/JavaScriptCore/dfg/DFGVariableAccessData.h207
-rw-r--r--Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.cpp7
-rw-r--r--Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.h4
-rw-r--r--Source/JavaScriptCore/dfg/DFGVariableEvent.cpp10
-rw-r--r--Source/JavaScriptCore/dfg/DFGVariableEvent.h23
-rw-r--r--Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp42
-rw-r--r--Source/JavaScriptCore/dfg/DFGVariableEventStream.h7
-rw-r--r--Source/JavaScriptCore/dfg/DFGVariadicFunction.h57
-rw-r--r--Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp8
-rw-r--r--Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.cpp106
-rw-r--r--Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGWorklist.cpp252
-rw-r--r--Source/JavaScriptCore/dfg/DFGWorklist.h81
303 files changed, 15177 insertions, 35182 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractHeap.cpp b/Source/JavaScriptCore/dfg/DFGAbstractHeap.cpp
index 1e1101902..ad597aaea 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractHeap.cpp
+++ b/Source/JavaScriptCore/dfg/DFGAbstractHeap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,8 +28,6 @@
#if ENABLE(DFG_JIT)
-#include "JSCInlines.h"
-
namespace JSC { namespace DFG {
void AbstractHeap::Payload::dump(PrintStream& out) const
@@ -43,7 +41,7 @@ void AbstractHeap::Payload::dump(PrintStream& out) const
void AbstractHeap::dump(PrintStream& out) const
{
out.print(kind());
- if (kind() == InvalidAbstractHeap || kind() == World || kind() == Heap || payload().isTop())
+ if (kind() == InvalidAbstractHeap || kind() == World || payload().isTop())
return;
out.print("(", payload(), ")");
}
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractHeap.h b/Source/JavaScriptCore/dfg/DFGAbstractHeap.h
index 4dafbc1cb..b42b0bbf1 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractHeap.h
+++ b/Source/JavaScriptCore/dfg/DFGAbstractHeap.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGAbstractHeap_h
#define DFGAbstractHeap_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "VirtualRegister.h"
@@ -34,45 +36,45 @@
namespace JSC { namespace DFG {
-// Implements a four-level type hierarchy:
+// Implements a three-level type hierarchy:
// - World is the supertype of all of the things.
-// - Stack with a TOP payload is a direct subtype of World
-// - Stack with a non-TOP payload is a direct subtype of Stack with a TOP payload.
-// - Heap is a direct subtype of World.
-// - Any other kind with TOP payload is the direct subtype of Heap.
-// - Any other kind with non-TOP payload is the direct subtype of the same kind with a TOP payload.
+// - Kind with TOP payload is the direct subtype of World.
+// - Kind with non-TOP payload is the direct subtype of its corresponding TOP Kind.
#define FOR_EACH_ABSTRACT_HEAP_KIND(macro) \
macro(InvalidAbstractHeap) \
macro(World) \
- macro(Stack) \
- macro(Heap) \
+ macro(Arguments_numArguments) \
+ macro(Arguments_overrideLength) \
+ macro(Arguments_registers) \
+ macro(Arguments_slowArguments) \
+ macro(ArrayBuffer_data) \
+ macro(Butterfly_arrayBuffer) \
macro(Butterfly_publicLength) \
macro(Butterfly_vectorLength) \
- macro(GetterSetter_getter) \
- macro(GetterSetter_setter) \
- macro(JSCell_structureID) \
- macro(JSCell_indexingType) \
- macro(JSCell_typeInfoFlags) \
- macro(JSCell_typeInfoType) \
+ macro(JSArrayBufferView_length) \
+ macro(JSArrayBufferView_mode) \
+ macro(JSArrayBufferView_vector) \
+ macro(JSCell_structure) \
+ macro(JSFunction_executable) \
+ macro(JSFunction_scopeChain) \
macro(JSObject_butterfly) \
- macro(JSPropertyNameEnumerator_cachedPropertyNames) \
+ macro(JSVariableObject_registers) \
macro(NamedProperties) \
macro(IndexedInt32Properties) \
macro(IndexedDoubleProperties) \
macro(IndexedContiguousProperties) \
- macro(IndexedArrayStorageProperties) \
macro(ArrayStorageProperties) \
- macro(DirectArgumentsProperties) \
- macro(ScopeProperties) \
+ macro(Variables) \
macro(TypedArrayProperties) \
- macro(HeapObjectCount) /* Used to reflect the fact that some allocations reveal object identity */\
+ macro(GCState) \
+ macro(BarrierState) \
macro(RegExpState) \
macro(InternalState) \
macro(Absolute) \
/* Use this for writes only, to indicate that this may fire watchpoints. Usually this is never directly written but instead we test to see if a node clobbers this; it just so happens that you have to write world to clobber it. */\
macro(Watchpoint_fire) \
- /* Use these for reads only, just to indicate that if the world got clobbered, then this operation will not work. */\
+ /* Use this for reads only, just to indicate that if the world got clobbered, then this operation will not work. */\
macro(MiscFields) \
/* Use this for writes only, just to indicate that hoisting the node is invalid. This works because we don't hoist anything that has any side effects at all. */\
macro(SideState)
@@ -131,11 +133,6 @@ public:
return m_value;
}
- int32_t value32() const
- {
- return static_cast<int32_t>(value());
- }
-
bool operator==(const Payload& other) const
{
return m_isTop == other.m_isTop
@@ -190,7 +187,7 @@ public:
AbstractHeap(AbstractHeapKind kind, Payload payload)
{
- ASSERT(kind != InvalidAbstractHeap && kind != World && kind != Heap && kind != SideState);
+ ASSERT(kind != InvalidAbstractHeap && kind != World);
m_value = encode(kind, payload);
}
@@ -208,49 +205,32 @@ public:
return payloadImpl();
}
- AbstractHeap supertype() const
+ bool isDisjoint(const AbstractHeap& other)
{
ASSERT(kind() != InvalidAbstractHeap);
- switch (kind()) {
- case World:
- return AbstractHeap();
- case Heap:
- case SideState:
- return World;
- default:
- if (payload().isTop()) {
- if (kind() == Stack)
- return World;
- return Heap;
- }
- return AbstractHeap(kind());
- }
- }
-
- bool isStrictSubtypeOf(const AbstractHeap& other) const
- {
- AbstractHeap current = *this;
- while (current.kind() != World) {
- current = current.supertype();
- if (current == other)
- return true;
- }
- return false;
- }
-
- bool isSubtypeOf(const AbstractHeap& other) const
- {
- return *this == other || isStrictSubtypeOf(other);
+ ASSERT(other.kind() != InvalidAbstractHeap);
+ if (kind() == World)
+ return false;
+ if (other.kind() == World)
+ return false;
+ if (kind() != other.kind())
+ return true;
+ return payload().isDisjoint(other.payload());
}
- bool overlaps(const AbstractHeap& other) const
+ bool overlaps(const AbstractHeap& other)
{
- return *this == other || isStrictSubtypeOf(other) || other.isStrictSubtypeOf(*this);
+ return !isDisjoint(other);
}
- bool isDisjoint(const AbstractHeap& other) const
+ AbstractHeap supertype() const
{
- return !overlaps(other);
+ ASSERT(kind() != InvalidAbstractHeap);
+ if (kind() == World)
+ return AbstractHeap();
+ if (payload().isTop())
+ return World;
+ return AbstractHeap(kind());
}
unsigned hash() const
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractInterpreter.h b/Source/JavaScriptCore/dfg/DFGAbstractInterpreter.h
index b3ebd68f1..eb4e5219f 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractInterpreter.h
+++ b/Source/JavaScriptCore/dfg/DFGAbstractInterpreter.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,20 +26,21 @@
#ifndef DFGAbstractInterpreter_h
#define DFGAbstractInterpreter_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGAbstractValue.h"
#include "DFGBranchDirection.h"
#include "DFGGraph.h"
#include "DFGNode.h"
-#include "DFGPhiChildren.h"
namespace JSC { namespace DFG {
template<typename AbstractStateType>
class AbstractInterpreter {
public:
- AbstractInterpreter(Graph&, AbstractStateType&);
+ AbstractInterpreter(Graph&, AbstractStateType& state);
~AbstractInterpreter();
AbstractValue& forNode(Node* node)
@@ -81,15 +82,22 @@ public:
//
// This is guaranteed to be equivalent to doing:
//
- // state.startExecuting()
- // state.executeEdges(index);
- // result = state.executeEffects(index);
+ // if (state.startExecuting(index)) {
+ // state.executeEdges(index);
+ // result = state.executeEffects(index);
+ // } else
+ // result = true;
bool execute(unsigned indexInBlock);
bool execute(Node*);
- // Indicate the start of execution of a node. It resets any state in the node
- // that is progressively built up by executeEdges() and executeEffects().
- void startExecuting();
+ // Indicate the start of execution of the node. It resets any state in the node,
+ // that is progressively built up by executeEdges() and executeEffects(). In
+ // particular, this resets canExit(), so if you want to "know" between calls of
+ // startExecuting() and executeEdges()/Effects() whether the last run of the
+ // analysis concluded that the node can exit, you should probably set that
+ // information aside prior to calling startExecuting().
+ bool startExecuting(Node*);
+ bool startExecuting(unsigned indexInBlock);
// Abstractly execute the edges of the given node. This runs filterEdgeByUse()
// on all edges of the node. You can skip this step, if you have already used
@@ -97,14 +105,10 @@ public:
void executeEdges(Node*);
void executeEdges(unsigned indexInBlock);
- ALWAYS_INLINE void filterEdgeByUse(Edge& edge)
+ ALWAYS_INLINE void filterEdgeByUse(Node* node, Edge& edge)
{
ASSERT(mayHaveTypeCheck(edge.useKind()) || !needsTypeCheck(edge));
- filterByType(edge, typeFilterFor(edge.useKind()));
- }
- ALWAYS_INLINE void filterEdgeByUse(Node*, Edge& edge)
- {
- filterEdgeByUse(edge);
+ filterByType(node, edge, typeFilterFor(edge.useKind()));
}
// Abstractly execute the effects of the given node. This changes the abstract
@@ -112,7 +116,6 @@ public:
bool executeEffects(unsigned indexInBlock);
bool executeEffects(unsigned clobberLimit, Node*);
- void dump(PrintStream& out) const;
void dump(PrintStream& out);
template<typename T>
@@ -134,7 +137,7 @@ public:
}
template<typename T>
- FiltrationResult filterByValue(T node, FrozenValue value)
+ FiltrationResult filterByValue(T node, JSValue value)
{
return filterByValue(forNode(node), value);
}
@@ -142,20 +145,12 @@ public:
FiltrationResult filter(AbstractValue&, const StructureSet&);
FiltrationResult filterArrayModes(AbstractValue&, ArrayModes);
FiltrationResult filter(AbstractValue&, SpeculatedType);
- FiltrationResult filterByValue(AbstractValue&, FrozenValue);
-
- PhiChildren* phiChildren() { return m_phiChildren.get(); }
+ FiltrationResult filterByValue(AbstractValue&, JSValue);
private:
void clobberWorld(const CodeOrigin&, unsigned indexInBlock);
-
- template<typename Functor>
- void forAllValues(unsigned indexInBlock, Functor&);
-
+ void clobberCapturedVars(const CodeOrigin&);
void clobberStructures(unsigned indexInBlock);
- void observeTransition(unsigned indexInBlock, Structure* from, Structure* to);
- void observeTransitions(unsigned indexInBlock, const TransitionVector&);
- void setDidClobber();
enum BooleanResult {
UnknownBooleanResult,
@@ -164,25 +159,19 @@ private:
};
BooleanResult booleanResult(Node*, AbstractValue&);
- void setBuiltInConstant(Node* node, FrozenValue value)
- {
- AbstractValue& abstractValue = forNode(node);
- abstractValue.set(m_graph, value, m_state.structureClobberState());
- abstractValue.fixTypeForRepresentation(m_graph, node);
- }
-
- void setConstant(Node* node, FrozenValue value)
+ void setConstant(Node* node, JSValue value)
{
- setBuiltInConstant(node, value);
+ forNode(node).set(m_graph, value);
m_state.setFoundConstants(true);
}
- ALWAYS_INLINE void filterByType(Edge& edge, SpeculatedType type)
+ ALWAYS_INLINE void filterByType(Node* node, Edge& edge, SpeculatedType type)
{
AbstractValue& value = forNode(edge);
- if (!value.isType(type))
+ if (!value.isType(type)) {
+ node->setCanExit(true);
edge.setProofStatus(NeedsCheck);
- else
+ } else
edge.setProofStatus(IsProved);
filter(value, type);
@@ -194,7 +183,6 @@ private:
CodeBlock* m_codeBlock;
Graph& m_graph;
AbstractStateType& m_state;
- std::unique_ptr<PhiChildren> m_phiChildren;
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h b/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h
index 932534826..3f68aced1 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h
+++ b/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,13 +26,12 @@
#ifndef DFGAbstractInterpreterInlines_h
#define DFGAbstractInterpreterInlines_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGAbstractInterpreter.h"
#include "GetByIdStatus.h"
-#include "GetterSetter.h"
-#include "JITOperations.h"
-#include "MathCommon.h"
#include "Operations.h"
#include "PutByIdStatus.h"
#include "StringObject.h"
@@ -45,8 +44,6 @@ AbstractInterpreter<AbstractStateType>::AbstractInterpreter(Graph& graph, Abstra
, m_graph(graph)
, m_state(state)
{
- if (m_graph.m_form == SSA)
- m_phiChildren = std::make_unique<PhiChildren>(m_graph);
}
template<typename AbstractStateType>
@@ -61,23 +58,17 @@ AbstractInterpreter<AbstractStateType>::booleanResult(
{
JSValue childConst = value.value();
if (childConst) {
- if (childConst.toBoolean(m_codeBlock->globalObjectFor(node->origin.semantic)->globalExec()))
+ if (childConst.toBoolean(m_codeBlock->globalObjectFor(node->codeOrigin)->globalExec()))
return DefinitelyTrue;
return DefinitelyFalse;
}
// Next check if we can fold because we know that the source is an object or string and does not equal undefined.
- if (isCellSpeculation(value.m_type) && !value.m_structure.isTop()) {
- bool allTrue = true;
- for (unsigned i = value.m_structure.size(); i--;) {
- Structure* structure = value.m_structure[i];
- if (structure->masqueradesAsUndefined(m_codeBlock->globalObjectFor(node->origin.semantic))
- || structure->typeInfo().type() == StringType) {
- allTrue = false;
- break;
- }
- }
- if (allTrue)
+ if (isCellSpeculation(value.m_type)
+ && value.m_currentKnownStructure.hasSingleton()) {
+ Structure* structure = value.m_currentKnownStructure.singleton();
+ if (!structure->masqueradesAsUndefined(m_codeBlock->globalObjectFor(node->codeOrigin))
+ && structure->typeInfo().type() != StringType)
return DefinitelyTrue;
}
@@ -85,12 +76,22 @@ AbstractInterpreter<AbstractStateType>::booleanResult(
}
template<typename AbstractStateType>
-void AbstractInterpreter<AbstractStateType>::startExecuting()
+bool AbstractInterpreter<AbstractStateType>::startExecuting(Node* node)
{
ASSERT(m_state.block());
ASSERT(m_state.isValid());
m_state.setDidClobber(false);
+
+ node->setCanExit(false);
+
+ return node->shouldGenerate();
+}
+
+template<typename AbstractStateType>
+bool AbstractInterpreter<AbstractStateType>::startExecuting(unsigned indexInBlock)
+{
+ return startExecuting(m_state.block()->at(indexInBlock));
}
template<typename AbstractStateType>
@@ -106,12 +107,9 @@ void AbstractInterpreter<AbstractStateType>::executeEdges(unsigned indexInBlock)
}
template<typename AbstractStateType>
-void AbstractInterpreter<AbstractStateType>::verifyEdge(Node* node, Edge edge)
+void AbstractInterpreter<AbstractStateType>::verifyEdge(Node*, Edge edge)
{
- if (!(forNode(edge).m_type & ~typeFilterFor(edge.useKind())))
- return;
-
- DFG_CRASH(m_graph, node, toCString("Edge verification error: ", node, "->", edge, " was expected to have type ", SpeculationDump(typeFilterFor(edge.useKind())), " but has type ", SpeculationDump(forNode(edge).m_type), " (", forNode(edge).m_type, ")").data());
+ RELEASE_ASSERT(!(forNode(edge).m_type & ~typeFilterFor(edge.useKind())));
}
template<typename AbstractStateType>
@@ -130,40 +128,54 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
switch (node->op()) {
case JSConstant:
- case DoubleConstant:
- case Int52Constant: {
- setBuiltInConstant(node, *node->constant());
+ case WeakJSConstant:
+ case PhantomArguments: {
+ forNode(node).set(m_graph, m_graph.valueOfJSConstant(node));
break;
}
case Identity: {
forNode(node) = forNode(node->child1());
- if (forNode(node).value())
- m_state.setFoundConstants(true);
+ break;
+ }
+
+ case GetArgument: {
+ ASSERT(m_graph.m_form == SSA);
+ VariableAccessData* variable = node->variableAccessData();
+ AbstractValue& value = m_state.variables().operand(variable->local().offset());
+ ASSERT(value.isHeapTop());
+ FiltrationResult result =
+ value.filter(typeFilterFor(useKindFor(variable->flushFormat())));
+ ASSERT_UNUSED(result, result == FiltrationOK);
+ forNode(node) = value;
break;
}
case ExtractOSREntryLocal: {
- forNode(node).makeBytecodeTop();
+ if (!(node->unlinkedLocal().isArgument())
+ && m_graph.m_lazyVars.get(node->unlinkedLocal().toLocal())) {
+ // This is kind of pessimistic - we could know in some cases that the
+ // DFG code at the point of the OSR had already initialized the lazy
+ // variable. But maybe this is fine, since we're inserting OSR
+ // entrypoints very early in the pipeline - so any lazy initializations
+ // ought to be hoisted out anyway.
+ forNode(node).makeBytecodeTop();
+ } else
+ forNode(node).makeHeapTop();
break;
}
case GetLocal: {
VariableAccessData* variableAccessData = node->variableAccessData();
+ if (variableAccessData->prediction() == SpecNone) {
+ m_state.setIsValid(false);
+ break;
+ }
AbstractValue value = m_state.variables().operand(variableAccessData->local().offset());
- // The value in the local should already be checked.
- DFG_ASSERT(m_graph, node, value.isType(typeFilterFor(variableAccessData->flushFormat())));
- if (value.value())
- m_state.setFoundConstants(true);
- forNode(node) = value;
- break;
- }
-
- case GetStack: {
- StackAccessData* data = node->stackAccessData();
- AbstractValue value = m_state.variables().operand(data->local);
- // The value in the local should already be checked.
- DFG_ASSERT(m_graph, node, value.isType(typeFilterFor(data->format)));
+ if (!variableAccessData->isCaptured()) {
+ if (value.isClear())
+ node->setCanExit(true);
+ }
if (value.value())
m_state.setFoundConstants(true);
forNode(node) = value;
@@ -179,12 +191,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
}
case SetLocal: {
- m_state.variables().operand(node->local()) = forNode(node->child1());
- break;
- }
-
- case PutStack: {
- m_state.variables().operand(node->stackAccessData()->local) = forNode(node->child1());
+ m_state.variables().operand(node->local().offset()) = forNode(node->child1());
break;
}
@@ -195,31 +202,10 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
break;
}
- case KillStack: {
- // This is just a hint telling us that the OSR state of the local is no longer inside the
- // flushed data.
- break;
- }
-
case SetArgument:
- // Assert that the state of arguments has been set. SetArgument means that someone set
- // the argument values out-of-band, and currently this always means setting to a
- // non-clear value.
- ASSERT(!m_state.variables().operand(node->local()).isClear());
- break;
-
- case LoadVarargs:
- case ForwardVarargs: {
- // FIXME: ForwardVarargs should check if the count becomes known, and if it does, it should turn
- // itself into a straight-line sequence of GetStack/PutStack.
- // https://bugs.webkit.org/show_bug.cgi?id=143071
- clobberWorld(node->origin.semantic, clobberLimit);
- LoadVarargsData* data = node->loadVarargsData();
- m_state.variables().operand(data->count).setType(SpecInt32);
- for (unsigned i = data->limit - 1; i--;)
- m_state.variables().operand(data->start.offset() + i).makeHeapTop();
+ // Assert that the state of arguments has been set.
+ ASSERT(!m_state.block()->valuesAtHead.operand(node->local()).isClear());
break;
- }
case BitAnd:
case BitOr:
@@ -257,14 +243,6 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
}
break;
}
-
- if (node->op() == BitAnd
- && (isBoolInt32Speculation(forNode(node->child1()).m_type) ||
- isBoolInt32Speculation(forNode(node->child2()).m_type))) {
- forNode(node).setType(SpecBoolInt32);
- break;
- }
-
forNode(node).setType(SpecInt32);
break;
}
@@ -277,7 +255,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
setConstant(node, jsNumber(value));
break;
}
- forNode(node).setType(SpecInt52AsDouble);
+ forNode(node).setType(SpecDouble);
break;
}
if (child && child.isInt32()) {
@@ -288,26 +266,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
}
}
forNode(node).setType(SpecInt32);
- break;
- }
-
- case BooleanToNumber: {
- JSValue concreteValue = forNode(node->child1()).value();
- if (concreteValue) {
- if (concreteValue.isBoolean())
- setConstant(node, jsNumber(concreteValue.asBoolean()));
- else
- setConstant(node, *m_graph.freeze(concreteValue));
- break;
- }
- AbstractValue& value = forNode(node);
- value = forNode(node->child1());
- if (node->child1().useKind() == UntypedUse && !(value.m_type & ~SpecBoolean))
- m_state.setFoundConstants(true);
- if (value.m_type & SpecBoolean) {
- value.merge(SpecBoolInt32);
- value.filter(~SpecBoolean);
- }
+ node->setCanExit(true);
break;
}
@@ -321,6 +280,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
break;
}
}
+ node->setCanExit(true);
forNode(node).setType(SpecInt32);
break;
}
@@ -336,86 +296,55 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
break;
}
if (child.isBoolean()) {
- setConstant(node, jsNumber(child.asBoolean()));
- break;
- }
- if (child.isUndefinedOrNull()) {
- setConstant(node, jsNumber(0));
+ setConstant(node, JSValue(child.asBoolean()));
break;
}
}
- if (isBooleanSpeculation(forNode(node->child1()).m_type)) {
- forNode(node).setType(SpecBoolInt32);
- break;
- }
-
forNode(node).setType(SpecInt32);
break;
}
- case DoubleRep: {
+ case Int32ToDouble: {
JSValue child = forNode(node->child1()).value();
if (child && child.isNumber()) {
- setConstant(node, jsDoubleNumber(child.asNumber()));
- break;
- }
-
- SpeculatedType type = forNode(node->child1()).m_type;
- switch (node->child1().useKind()) {
- case NotCellUse: {
- if (type & SpecOther) {
- type &= ~SpecOther;
- type |= SpecDoublePureNaN | SpecBoolInt32; // Null becomes zero, undefined becomes NaN.
- }
- if (type & SpecBoolean) {
- type &= ~SpecBoolean;
- type |= SpecBoolInt32; // True becomes 1, false becomes 0.
- }
- type &= SpecBytecodeNumber;
+ setConstant(node, JSValue(JSValue::EncodeAsDouble, child.asNumber()));
break;
}
-
- case Int52RepUse:
- case NumberUse:
- case RealNumberUse:
- break;
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
- forNode(node).setType(type);
- forNode(node).fixTypeForRepresentation(m_graph, node);
+ if (isInt32Speculation(forNode(node->child1()).m_type))
+ forNode(node).setType(SpecDoubleReal);
+ else
+ forNode(node).setType(SpecDouble);
break;
}
- case Int52Rep: {
+ case Int52ToDouble: {
JSValue child = forNode(node->child1()).value();
- if (child && child.isMachineInt()) {
+ if (child && child.isNumber()) {
setConstant(node, child);
break;
}
-
- forNode(node).setType(SpecInt32);
+ forNode(node).setType(SpecDouble);
break;
}
- case ValueRep: {
- JSValue value = forNode(node->child1()).value();
- if (value) {
- setConstant(node, value);
+ case Int52ToValue: {
+ JSValue child = forNode(node->child1()).value();
+ if (child && child.isNumber()) {
+ setConstant(node, child);
break;
}
-
- forNode(node).setType(m_graph, forNode(node->child1()).m_type & ~SpecDoubleImpureNaN);
- forNode(node).fixTypeForRepresentation(m_graph, node);
+ SpeculatedType type = forNode(node->child1()).m_type;
+ if (type & SpecInt52)
+ type = (type | SpecInt32 | SpecInt52AsDouble) & ~SpecInt52;
+ forNode(node).setType(type);
break;
}
case ValueAdd: {
ASSERT(node->binaryUseKind() == UntypedUse);
- clobberWorld(node->origin.semantic, clobberLimit);
- forNode(node).setType(m_graph, SpecString | SpecBytecodeNumber);
+ clobberWorld(node->codeOrigin, clobberLimit);
+ forNode(node).setType(SpecString | SpecBytecodeNumber);
break;
}
@@ -436,8 +365,10 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
}
}
forNode(node).setType(SpecInt32);
+ if (shouldCheckOverflow(node->arithMode()))
+ node->setCanExit(true);
break;
- case Int52RepUse:
+ case MachineIntUse:
if (left && right && left.isMachineInt() && right.isMachineInt()) {
JSValue result = jsNumber(left.asMachineInt() + right.asMachineInt());
if (result.isMachineInt()) {
@@ -445,16 +376,21 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
break;
}
}
- forNode(node).setType(SpecMachineInt);
+ forNode(node).setType(SpecInt52);
+ if (!forNode(node->child1()).isType(SpecInt32)
+ || !forNode(node->child2()).isType(SpecInt32))
+ node->setCanExit(true);
break;
- case DoubleRepUse:
+ case NumberUse:
if (left && right && left.isNumber() && right.isNumber()) {
- setConstant(node, jsDoubleNumber(left.asNumber() + right.asNumber()));
+ setConstant(node, jsNumber(left.asNumber() + right.asNumber()));
break;
}
- forNode(node).setType(
- typeOfDoubleSum(
- forNode(node->child1()).m_type, forNode(node->child2()).m_type));
+ if (isFullRealNumberSpeculation(forNode(node->child1()).m_type)
+ && isFullRealNumberSpeculation(forNode(node->child2()).m_type))
+ forNode(node).setType(SpecDoubleReal);
+ else
+ forNode(node).setType(SpecDouble);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
@@ -462,19 +398,9 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
}
break;
}
-
- case ArithClz32: {
- JSValue operand = forNode(node->child1()).value();
- if (operand && operand.isNumber()) {
- uint32_t value = toUInt32(operand.asNumber());
- setConstant(node, jsNumber(clz32(value)));
- break;
- }
- forNode(node).setType(SpecInt32);
- break;
- }
-
+
case MakeRope: {
+ node->setCanExit(true);
forNode(node).set(m_graph, m_graph.m_vm.stringStructure.get());
break;
}
@@ -496,8 +422,10 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
}
}
forNode(node).setType(SpecInt32);
+ if (shouldCheckOverflow(node->arithMode()))
+ node->setCanExit(true);
break;
- case Int52RepUse:
+ case MachineIntUse:
if (left && right && left.isMachineInt() && right.isMachineInt()) {
JSValue result = jsNumber(left.asMachineInt() - right.asMachineInt());
if (result.isMachineInt() || !shouldCheckOverflow(node->arithMode())) {
@@ -505,16 +433,17 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
break;
}
}
- forNode(node).setType(SpecMachineInt);
+ forNode(node).setType(SpecInt52);
+ if (!forNode(node->child1()).isType(SpecInt32)
+ || !forNode(node->child2()).isType(SpecInt32))
+ node->setCanExit(true);
break;
- case DoubleRepUse:
+ case NumberUse:
if (left && right && left.isNumber() && right.isNumber()) {
- setConstant(node, jsDoubleNumber(left.asNumber() - right.asNumber()));
+ setConstant(node, jsNumber(left.asNumber() - right.asNumber()));
break;
}
- forNode(node).setType(
- typeOfDoubleDifference(
- forNode(node->child1()).m_type, forNode(node->child2()).m_type));
+ forNode(node).setType(SpecDouble);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
@@ -544,8 +473,10 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
}
}
forNode(node).setType(SpecInt32);
+ if (shouldCheckOverflow(node->arithMode()))
+ node->setCanExit(true);
break;
- case Int52RepUse:
+ case MachineIntUse:
if (child && child.isMachineInt()) {
double doubleResult;
if (shouldCheckNegativeZero(node->arithMode()))
@@ -558,16 +489,18 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
break;
}
}
- forNode(node).setType(SpecMachineInt);
+ forNode(node).setType(SpecInt52);
+ if (m_state.forNode(node->child1()).couldBeType(SpecInt52))
+ node->setCanExit(true);
+ if (shouldCheckNegativeZero(node->arithMode()))
+ node->setCanExit(true);
break;
- case DoubleRepUse:
+ case NumberUse:
if (child && child.isNumber()) {
- setConstant(node, jsDoubleNumber(-child.asNumber()));
+ setConstant(node, jsNumber(-child.asNumber()));
break;
}
- forNode(node).setType(
- typeOfDoubleNegation(
- forNode(node->child1()).m_type));
+ forNode(node).setType(SpecDouble);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
@@ -596,8 +529,10 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
}
}
forNode(node).setType(SpecInt32);
+ if (shouldCheckOverflow(node->arithMode()))
+ node->setCanExit(true);
break;
- case Int52RepUse:
+ case MachineIntUse:
if (left && right && left.isMachineInt() && right.isMachineInt()) {
double doubleResult = left.asNumber() * right.asNumber();
if (!shouldCheckNegativeZero(node->arithMode()))
@@ -608,16 +543,19 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
break;
}
}
- forNode(node).setType(SpecMachineInt);
+ forNode(node).setType(SpecInt52);
+ node->setCanExit(true);
break;
- case DoubleRepUse:
+ case NumberUse:
if (left && right && left.isNumber() && right.isNumber()) {
- setConstant(node, jsDoubleNumber(left.asNumber() * right.asNumber()));
+ setConstant(node, jsNumber(left.asNumber() * right.asNumber()));
break;
}
- forNode(node).setType(
- typeOfDoubleProduct(
- forNode(node->child1()).m_type, forNode(node->child2()).m_type));
+ if (isFullRealNumberSpeculation(forNode(node->child1()).m_type)
+ || isFullRealNumberSpeculation(forNode(node->child2()).m_type))
+ forNode(node).setType(SpecDoubleReal);
+ else
+ forNode(node).setType(SpecDouble);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
@@ -644,15 +582,14 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
}
}
forNode(node).setType(SpecInt32);
+ node->setCanExit(true);
break;
- case DoubleRepUse:
+ case NumberUse:
if (left && right && left.isNumber() && right.isNumber()) {
- setConstant(node, jsDoubleNumber(left.asNumber() / right.asNumber()));
+ setConstant(node, jsNumber(left.asNumber() / right.asNumber()));
break;
}
- forNode(node).setType(
- typeOfDoubleQuotient(
- forNode(node->child1()).m_type, forNode(node->child2()).m_type));
+ forNode(node).setType(SpecDouble);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
@@ -679,15 +616,14 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
}
}
forNode(node).setType(SpecInt32);
+ node->setCanExit(true);
break;
- case DoubleRepUse:
+ case NumberUse:
if (left && right && left.isNumber() && right.isNumber()) {
- setConstant(node, jsDoubleNumber(fmod(left.asNumber(), right.asNumber())));
+ setConstant(node, jsNumber(fmod(left.asNumber(), right.asNumber())));
break;
}
- forNode(node).setType(
- typeOfDoubleBinaryOp(
- forNode(node->child1()).m_type, forNode(node->child2()).m_type));
+ forNode(node).setType(SpecDouble);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
@@ -706,17 +642,16 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
break;
}
forNode(node).setType(SpecInt32);
+ node->setCanExit(true);
break;
- case DoubleRepUse:
+ case NumberUse:
if (left && right && left.isNumber() && right.isNumber()) {
double a = left.asNumber();
double b = right.asNumber();
- setConstant(node, jsDoubleNumber(a < b ? a : (b <= a ? b : a + b)));
+ setConstant(node, jsNumber(a < b ? a : (b <= a ? b : a + b)));
break;
}
- forNode(node).setType(
- typeOfDoubleMinMax(
- forNode(node->child1()).m_type, forNode(node->child2()).m_type));
+ forNode(node).setType(SpecDouble);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
@@ -735,17 +670,16 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
break;
}
forNode(node).setType(SpecInt32);
+ node->setCanExit(true);
break;
- case DoubleRepUse:
+ case NumberUse:
if (left && right && left.isNumber() && right.isNumber()) {
double a = left.asNumber();
double b = right.asNumber();
- setConstant(node, jsDoubleNumber(a > b ? a : (b >= a ? b : a + b)));
+ setConstant(node, jsNumber(a > b ? a : (b >= a ? b : a + b)));
break;
}
- forNode(node).setType(
- typeOfDoubleMinMax(
- forNode(node->child1()).m_type, forNode(node->child2()).m_type));
+ forNode(node).setType(SpecDouble);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
@@ -766,13 +700,14 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
}
}
forNode(node).setType(SpecInt32);
+ node->setCanExit(true);
break;
- case DoubleRepUse:
+ case NumberUse:
if (child && child.isNumber()) {
- setConstant(node, jsDoubleNumber(fabs(child.asNumber())));
+ setConstant(node, jsNumber(child.asNumber()));
break;
}
- forNode(node).setType(typeOfDoubleAbs(forNode(node->child1()).m_type));
+ forNode(node).setType(SpecDouble);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
@@ -780,102 +715,34 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
}
break;
}
-
- case ArithPow: {
- JSValue childY = forNode(node->child2()).value();
- if (childY && childY.isNumber()) {
- if (!childY.asNumber()) {
- setConstant(node, jsDoubleNumber(1));
- break;
- }
-
- JSValue childX = forNode(node->child1()).value();
- if (childX && childX.isNumber()) {
- setConstant(node, jsDoubleNumber(operationMathPow(childX.asNumber(), childY.asNumber())));
- break;
- }
- }
- forNode(node).setType(typeOfDoublePow(forNode(node->child1()).m_type, forNode(node->child2()).m_type));
- break;
- }
-
- case ArithRound: {
- JSValue operand = forNode(node->child1()).value();
- if (operand && operand.isNumber()) {
- double roundedValue = jsRound(operand.asNumber());
-
- if (producesInteger(node->arithRoundingMode())) {
- int32_t roundedValueAsInt32 = static_cast<int32_t>(roundedValue);
- if (roundedValueAsInt32 == roundedValue) {
- if (shouldCheckNegativeZero(node->arithRoundingMode())) {
- if (roundedValueAsInt32 || !std::signbit(roundedValue)) {
- setConstant(node, jsNumber(roundedValueAsInt32));
- break;
- }
- } else {
- setConstant(node, jsNumber(roundedValueAsInt32));
- break;
- }
- }
- } else {
- setConstant(node, jsDoubleNumber(roundedValue));
- break;
- }
- }
- if (producesInteger(node->arithRoundingMode()))
- forNode(node).setType(SpecInt32);
- else
- forNode(node).setType(typeOfDoubleRounding(forNode(node->child1()).m_type));
- break;
- }
case ArithSqrt: {
JSValue child = forNode(node->child1()).value();
if (child && child.isNumber()) {
- setConstant(node, jsDoubleNumber(sqrt(child.asNumber())));
+ setConstant(node, jsNumber(sqrt(child.asNumber())));
break;
}
- forNode(node).setType(typeOfDoubleUnaryOp(forNode(node->child1()).m_type));
- break;
- }
-
- case ArithFRound: {
- JSValue child = forNode(node->child1()).value();
- if (child && child.isNumber()) {
- setConstant(node, jsDoubleNumber(static_cast<float>(child.asNumber())));
- break;
- }
- forNode(node).setType(typeOfDoubleRounding(forNode(node->child1()).m_type));
+ forNode(node).setType(SpecDouble);
break;
}
case ArithSin: {
JSValue child = forNode(node->child1()).value();
if (child && child.isNumber()) {
- setConstant(node, jsDoubleNumber(sin(child.asNumber())));
+ setConstant(node, jsNumber(sin(child.asNumber())));
break;
}
- forNode(node).setType(typeOfDoubleUnaryOp(forNode(node->child1()).m_type));
+ forNode(node).setType(SpecDouble);
break;
}
case ArithCos: {
JSValue child = forNode(node->child1()).value();
if (child && child.isNumber()) {
- setConstant(node, jsDoubleNumber(cos(child.asNumber())));
- break;
- }
- forNode(node).setType(typeOfDoubleUnaryOp(forNode(node->child1()).m_type));
- break;
- }
-
- case ArithLog: {
- JSValue child = forNode(node->child1()).value();
- if (child && child.isNumber()) {
- setConstant(node, jsDoubleNumber(log(child.asNumber())));
+ setConstant(node, jsNumber(cos(child.asNumber())));
break;
}
- forNode(node).setType(typeOfDoubleUnaryOp(forNode(node->child1()).m_type));
+ forNode(node).setType(SpecDouble);
break;
}
@@ -888,6 +755,20 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
setConstant(node, jsBoolean(true));
break;
default:
+ switch (node->child1().useKind()) {
+ case BooleanUse:
+ case Int32Use:
+ case NumberUse:
+ case UntypedUse:
+ case StringUse:
+ break;
+ case ObjectOrOtherUse:
+ node->setCanExit(true);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
forNode(node).setType(SpecBoolean);
break;
}
@@ -899,59 +780,35 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
case IsNumber:
case IsString:
case IsObject:
- case IsObjectOrNull:
case IsFunction: {
- AbstractValue child = forNode(node->child1());
- if (child.value()) {
+ node->setCanExit(
+ node->op() == IsUndefined
+ && m_graph.masqueradesAsUndefinedWatchpointIsStillValid(node->codeOrigin));
+ JSValue child = forNode(node->child1()).value();
+ if (child) {
bool constantWasSet = true;
switch (node->op()) {
case IsUndefined:
setConstant(node, jsBoolean(
- child.value().isCell()
- ? child.value().asCell()->structure()->masqueradesAsUndefined(m_codeBlock->globalObjectFor(node->origin.semantic))
- : child.value().isUndefined()));
+ child.isCell()
+ ? child.asCell()->structure()->masqueradesAsUndefined(m_codeBlock->globalObjectFor(node->codeOrigin))
+ : child.isUndefined()));
break;
case IsBoolean:
- setConstant(node, jsBoolean(child.value().isBoolean()));
+ setConstant(node, jsBoolean(child.isBoolean()));
break;
case IsNumber:
- setConstant(node, jsBoolean(child.value().isNumber()));
+ setConstant(node, jsBoolean(child.isNumber()));
break;
case IsString:
- setConstant(node, jsBoolean(isJSString(child.value())));
+ setConstant(node, jsBoolean(isJSString(child)));
break;
case IsObject:
- setConstant(node, jsBoolean(child.value().isObject()));
- break;
- case IsObjectOrNull:
- if (child.value().isObject()) {
- JSObject* object = asObject(child.value());
- if (object->type() == JSFunctionType)
- setConstant(node, jsBoolean(false));
- else if (!(object->inlineTypeFlags() & TypeOfShouldCallGetCallData))
- setConstant(node, jsBoolean(!child.value().asCell()->structure()->masqueradesAsUndefined(m_codeBlock->globalObjectFor(node->origin.semantic))));
- else {
- // FIXME: This could just call getCallData.
- // https://bugs.webkit.org/show_bug.cgi?id=144457
- constantWasSet = false;
- }
- } else
- setConstant(node, jsBoolean(child.value().isNull()));
- break;
- case IsFunction:
- if (child.value().isObject()) {
- JSObject* object = asObject(child.value());
- if (object->type() == JSFunctionType)
- setConstant(node, jsBoolean(true));
- else if (!(object->inlineTypeFlags() & TypeOfShouldCallGetCallData))
- setConstant(node, jsBoolean(false));
- else {
- // FIXME: This could just call getCallData.
- // https://bugs.webkit.org/show_bug.cgi?id=144457
- constantWasSet = false;
- }
- } else
- setConstant(node, jsBoolean(false));
+ if (child.isNull() || !child.isObject()) {
+ setConstant(node, jsBoolean(child.isNull()));
+ break;
+ }
+ constantWasSet = false;
break;
default:
constantWasSet = false;
@@ -960,131 +817,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
if (constantWasSet)
break;
}
-
- // FIXME: This code should really use AbstractValue::isType() and
- // AbstractValue::couldBeType().
- // https://bugs.webkit.org/show_bug.cgi?id=146870
-
- bool constantWasSet = false;
- switch (node->op()) {
- case IsUndefined:
- // FIXME: Use the masquerades-as-undefined watchpoint thingy.
- // https://bugs.webkit.org/show_bug.cgi?id=144456
-
- if (!(child.m_type & (SpecOther | SpecObjectOther))) {
- setConstant(node, jsBoolean(false));
- constantWasSet = true;
- break;
- }
-
- break;
- case IsBoolean:
- if (!(child.m_type & ~SpecBoolean)) {
- setConstant(node, jsBoolean(true));
- constantWasSet = true;
- break;
- }
-
- if (!(child.m_type & SpecBoolean)) {
- setConstant(node, jsBoolean(false));
- constantWasSet = true;
- break;
- }
-
- break;
- case IsNumber:
- if (!(child.m_type & ~SpecFullNumber)) {
- setConstant(node, jsBoolean(true));
- constantWasSet = true;
- break;
- }
-
- if (!(child.m_type & SpecFullNumber)) {
- setConstant(node, jsBoolean(false));
- constantWasSet = true;
- break;
- }
-
- break;
- case IsString:
- if (!(child.m_type & ~SpecString)) {
- setConstant(node, jsBoolean(true));
- constantWasSet = true;
- break;
- }
-
- if (!(child.m_type & SpecString)) {
- setConstant(node, jsBoolean(false));
- constantWasSet = true;
- break;
- }
-
- break;
- case IsObject:
- if (!(child.m_type & ~SpecObject)) {
- setConstant(node, jsBoolean(true));
- constantWasSet = true;
- break;
- }
-
- if (!(child.m_type & SpecObject)) {
- setConstant(node, jsBoolean(false));
- constantWasSet = true;
- break;
- }
-
- break;
- case IsObjectOrNull:
- // FIXME: Use the masquerades-as-undefined watchpoint thingy.
- // https://bugs.webkit.org/show_bug.cgi?id=144456
-
- // These expressions are complicated to parse. A helpful way to parse this is that
- // "!(T & ~S)" means "T is a subset of S". Conversely, "!(T & S)" means "T is a
- // disjoint set from S". Things like "T - S" means that, provided that S is a
- // subset of T, it's the "set of all things in T but not in S". Things like "T | S"
- // mean the "union of T and S".
-
- // Is the child's type an object that isn't an other-object (i.e. object that could
- // have masquaredes-as-undefined traps) and isn't a function? Then: we should fold
- // this to true.
- if (!(child.m_type & ~(SpecObject - SpecObjectOther - SpecFunction))) {
- setConstant(node, jsBoolean(true));
- constantWasSet = true;
- break;
- }
-
- // Is the child's type definitely not either of: an object that isn't a function,
- // or either undefined or null? Then: we should fold this to false. This means
- // for example that if it's any non-function object, including those that have
- // masquerades-as-undefined traps, then we don't fold. It also means we won't fold
- // if it's undefined-or-null, since the type bits don't distinguish between
- // undefined (which should fold to false) and null (which should fold to true).
- if (!(child.m_type & ((SpecObject - SpecFunction) | SpecOther))) {
- setConstant(node, jsBoolean(false));
- constantWasSet = true;
- break;
- }
-
- break;
- case IsFunction:
- if (!(child.m_type & ~SpecFunction)) {
- setConstant(node, jsBoolean(true));
- constantWasSet = true;
- break;
- }
-
- if (!(child.m_type & (SpecFunction | SpecObjectOther))) {
- setConstant(node, jsBoolean(false));
- constantWasSet = true;
- break;
- }
- break;
- default:
- break;
- }
- if (constantWasSet)
- break;
-
+
forNode(node).setType(SpecBoolean);
break;
}
@@ -1094,44 +827,48 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
JSValue child = forNode(node->child1()).value();
AbstractValue& abstractChild = forNode(node->child1());
if (child) {
- JSValue typeString = jsTypeStringForValue(*vm, m_codeBlock->globalObjectFor(node->origin.semantic), child);
- setConstant(node, *m_graph.freeze(typeString));
+ JSValue typeString = jsTypeStringForValue(*vm, m_codeBlock->globalObjectFor(node->codeOrigin), child);
+ setConstant(node, typeString);
break;
}
if (isFullNumberSpeculation(abstractChild.m_type)) {
- setConstant(node, *m_graph.freeze(vm->smallStrings.numberString()));
+ setConstant(node, vm->smallStrings.numberString());
break;
}
if (isStringSpeculation(abstractChild.m_type)) {
- setConstant(node, *m_graph.freeze(vm->smallStrings.stringString()));
+ setConstant(node, vm->smallStrings.stringString());
break;
}
-
- // FIXME: We could use the masquerades-as-undefined watchpoint here.
- // https://bugs.webkit.org/show_bug.cgi?id=144456
- if (!(abstractChild.m_type & ~(SpecObject - SpecObjectOther))) {
- setConstant(node, *m_graph.freeze(vm->smallStrings.objectString()));
+
+ if (isFinalObjectSpeculation(abstractChild.m_type) || isArraySpeculation(abstractChild.m_type) || isArgumentsSpeculation(abstractChild.m_type)) {
+ setConstant(node, vm->smallStrings.objectString());
break;
}
if (isFunctionSpeculation(abstractChild.m_type)) {
- setConstant(node, *m_graph.freeze(vm->smallStrings.functionString()));
+ setConstant(node, vm->smallStrings.functionString());
break;
}
if (isBooleanSpeculation(abstractChild.m_type)) {
- setConstant(node, *m_graph.freeze(vm->smallStrings.booleanString()));
+ setConstant(node, vm->smallStrings.booleanString());
break;
}
- if (isSymbolSpeculation(abstractChild.m_type)) {
- setConstant(node, *m_graph.freeze(vm->smallStrings.symbolString()));
+ switch (node->child1().useKind()) {
+ case StringUse:
+ case CellUse:
+ node->setCanExit(true);
+ break;
+ case UntypedUse:
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
-
- forNode(node).setType(m_graph, SpecStringIdent);
+ forNode(node).set(m_graph, m_graph.m_vm.stringStructure.get());
break;
}
@@ -1183,127 +920,77 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
if (node->op() == CompareEqConstant || node->op() == CompareEq) {
SpeculatedType leftType = forNode(node->child1()).m_type;
SpeculatedType rightType = forNode(node->child2()).m_type;
- if (!valuesCouldBeEqual(leftType, rightType)) {
+ if ((isInt32Speculation(leftType) && isOtherSpeculation(rightType))
+ || (isOtherSpeculation(leftType) && isInt32Speculation(rightType))) {
setConstant(node, jsBoolean(false));
break;
}
}
- if (node->child1() == node->child2()) {
- if (node->isBinaryUseKind(Int32Use) ||
- node->isBinaryUseKind(Int52RepUse) ||
- node->isBinaryUseKind(StringUse) ||
- node->isBinaryUseKind(BooleanUse) ||
- node->isBinaryUseKind(StringIdentUse) ||
- node->isBinaryUseKind(ObjectUse) ||
- node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse) ||
- node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
- switch (node->op()) {
- case CompareLess:
- case CompareGreater:
- setConstant(node, jsBoolean(false));
- break;
- case CompareLessEq:
- case CompareGreaterEq:
- case CompareEq:
- case CompareEqConstant:
- setConstant(node, jsBoolean(true));
- break;
- default:
- DFG_CRASH(m_graph, node, "Unexpected node type");
- break;
- }
- break;
- }
- }
-
forNode(node).setType(SpecBoolean);
+
+ // This is overly conservative. But the only thing this prevents is store elimination,
+ // and how likely is it, really, that you'll have redundant stores across a comparison
+ // operation? Comparison operations are typically at the end of basic blocks, so
+ // unless we have global store elimination (super unlikely given how unprofitable that
+ // optimization is to begin with), you aren't going to be wanting to store eliminate
+ // across an equality op.
+ node->setCanExit(true);
break;
}
- case CompareStrictEq: {
+ case CompareStrictEq:
+ case CompareStrictEqConstant: {
Node* leftNode = node->child1().node();
Node* rightNode = node->child2().node();
JSValue left = forNode(leftNode).value();
JSValue right = forNode(rightNode).value();
if (left && right) {
+ if (left.isNumber() && right.isNumber()) {
+ setConstant(node, jsBoolean(left.asNumber() == right.asNumber()));
+ break;
+ }
if (left.isString() && right.isString()) {
- // We need this case because JSValue::strictEqual is otherwise too racy for
- // string comparisons.
const StringImpl* a = asString(left)->tryGetValueImpl();
const StringImpl* b = asString(right)->tryGetValueImpl();
if (a && b) {
setConstant(node, jsBoolean(WTF::equal(a, b)));
break;
}
- } else {
- setConstant(node, jsBoolean(JSValue::strictEqual(0, left, right)));
- break;
- }
- }
-
- SpeculatedType leftLUB = leastUpperBoundOfStrictlyEquivalentSpeculations(forNode(leftNode).m_type);
- SpeculatedType rightLUB = leastUpperBoundOfStrictlyEquivalentSpeculations(forNode(rightNode).m_type);
- if (!(leftLUB & rightLUB)) {
- setConstant(node, jsBoolean(false));
- break;
- }
-
- if (node->child1() == node->child2()) {
- if (node->isBinaryUseKind(BooleanUse) ||
- node->isBinaryUseKind(Int32Use) ||
- node->isBinaryUseKind(Int52RepUse) ||
- node->isBinaryUseKind(StringUse) ||
- node->isBinaryUseKind(StringIdentUse) ||
- node->isBinaryUseKind(ObjectUse) ||
- node->isBinaryUseKind(MiscUse, UntypedUse) ||
- node->isBinaryUseKind(UntypedUse, MiscUse) ||
- node->isBinaryUseKind(StringIdentUse, NotStringVarUse) ||
- node->isBinaryUseKind(NotStringVarUse, StringIdentUse) ||
- node->isBinaryUseKind(StringUse, UntypedUse) ||
- node->isBinaryUseKind(UntypedUse, StringUse)) {
- setConstant(node, jsBoolean(true));
- break;
}
}
-
forNode(node).setType(SpecBoolean);
+ node->setCanExit(true); // This is overly conservative.
break;
}
case StringCharCodeAt:
+ node->setCanExit(true);
forNode(node).setType(SpecInt32);
break;
case StringFromCharCode:
- forNode(node).setType(m_graph, SpecString);
+ forNode(node).setType(SpecString);
break;
case StringCharAt:
+ node->setCanExit(true);
forNode(node).set(m_graph, m_graph.m_vm.stringStructure.get());
break;
case GetByVal: {
+ node->setCanExit(true);
switch (node->arrayMode().type()) {
case Array::SelectUsingPredictions:
case Array::Unprofiled:
- case Array::SelectUsingArguments:
+ case Array::Undecided:
RELEASE_ASSERT_NOT_REACHED();
break;
case Array::ForceExit:
m_state.setIsValid(false);
break;
- case Array::Undecided: {
- JSValue index = forNode(node->child2()).value();
- if (index && index.isInt32() && index.asInt32() >= 0) {
- setConstant(node, jsUndefined());
- break;
- }
- forNode(node).setType(SpecOther);
- break;
- }
case Array::Generic:
- clobberWorld(node->origin.semantic, clobberLimit);
+ clobberWorld(node->codeOrigin, clobberLimit);
forNode(node).makeHeapTop();
break;
case Array::String:
@@ -1318,28 +1005,27 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
// implies an in-bounds access). None of this feels like it's worth it,
// so we're going with TOP for now. The same thing applies to
// clobbering the world.
- clobberWorld(node->origin.semantic, clobberLimit);
+ clobberWorld(node->codeOrigin, clobberLimit);
forNode(node).makeHeapTop();
} else
forNode(node).set(m_graph, m_graph.m_vm.stringStructure.get());
break;
- case Array::DirectArguments:
- case Array::ScopedArguments:
+ case Array::Arguments:
forNode(node).makeHeapTop();
break;
case Array::Int32:
if (node->arrayMode().isOutOfBounds()) {
- clobberWorld(node->origin.semantic, clobberLimit);
+ clobberWorld(node->codeOrigin, clobberLimit);
forNode(node).makeHeapTop();
} else
forNode(node).setType(SpecInt32);
break;
case Array::Double:
if (node->arrayMode().isOutOfBounds()) {
- clobberWorld(node->origin.semantic, clobberLimit);
+ clobberWorld(node->codeOrigin, clobberLimit);
forNode(node).makeHeapTop();
} else if (node->arrayMode().isSaneChain())
- forNode(node).setType(SpecBytecodeDouble);
+ forNode(node).setType(SpecDouble);
else
forNode(node).setType(SpecDoubleReal);
break;
@@ -1347,7 +1033,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
case Array::ArrayStorage:
case Array::SlowPutArrayStorage:
if (node->arrayMode().isOutOfBounds())
- clobberWorld(node->origin.semantic, clobberLimit);
+ clobberWorld(node->codeOrigin, clobberLimit);
forNode(node).makeHeapTop();
break;
case Array::Int8Array:
@@ -1374,13 +1060,13 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
else if (enableInt52() && node->shouldSpeculateMachineInt())
forNode(node).setType(SpecInt52);
else
- forNode(node).setType(SpecInt52AsDouble);
+ forNode(node).setType(SpecDouble);
break;
case Array::Float32Array:
- forNode(node).setType(SpecFullDouble);
+ forNode(node).setType(SpecDouble);
break;
case Array::Float64Array:
- forNode(node).setType(SpecFullDouble);
+ forNode(node).setType(SpecDouble);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
@@ -1392,29 +1078,30 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
case PutByValDirect:
case PutByVal:
case PutByValAlias: {
+ node->setCanExit(true);
switch (node->arrayMode().modeForPut().type()) {
case Array::ForceExit:
m_state.setIsValid(false);
break;
case Array::Generic:
- clobberWorld(node->origin.semantic, clobberLimit);
+ clobberWorld(node->codeOrigin, clobberLimit);
break;
case Array::Int32:
if (node->arrayMode().isOutOfBounds())
- clobberWorld(node->origin.semantic, clobberLimit);
+ clobberWorld(node->codeOrigin, clobberLimit);
break;
case Array::Double:
if (node->arrayMode().isOutOfBounds())
- clobberWorld(node->origin.semantic, clobberLimit);
+ clobberWorld(node->codeOrigin, clobberLimit);
break;
case Array::Contiguous:
case Array::ArrayStorage:
if (node->arrayMode().isOutOfBounds())
- clobberWorld(node->origin.semantic, clobberLimit);
+ clobberWorld(node->codeOrigin, clobberLimit);
break;
case Array::SlowPutArrayStorage:
if (node->arrayMode().mayStoreToHole())
- clobberWorld(node->origin.semantic, clobberLimit);
+ clobberWorld(node->codeOrigin, clobberLimit);
break;
default:
break;
@@ -1423,59 +1110,16 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
}
case ArrayPush:
- clobberWorld(node->origin.semantic, clobberLimit);
+ node->setCanExit(true);
+ clobberWorld(node->codeOrigin, clobberLimit);
forNode(node).setType(SpecBytecodeNumber);
break;
case ArrayPop:
- clobberWorld(node->origin.semantic, clobberLimit);
- forNode(node).makeHeapTop();
- break;
-
- case GetMyArgumentByVal: {
- JSValue index = forNode(node->child2()).m_value;
- InlineCallFrame* inlineCallFrame = node->child1()->origin.semantic.inlineCallFrame;
-
- if (index && index.isInt32()) {
- // This pretends to return TOP for accesses that are actually proven out-of-bounds because
- // that's the conservative thing to do. Otherwise we'd need to write more code to mark such
- // paths as unreachable, and it's almost certainly not worth the effort.
-
- if (inlineCallFrame) {
- if (index.asUInt32() < inlineCallFrame->arguments.size() - 1) {
- forNode(node) = m_state.variables().operand(
- virtualRegisterForArgument(index.asInt32() + 1) + inlineCallFrame->stackOffset);
- m_state.setFoundConstants(true);
- break;
- }
- } else {
- if (index.asUInt32() < m_state.variables().numberOfArguments() - 1) {
- forNode(node) = m_state.variables().argument(index.asInt32() + 1);
- m_state.setFoundConstants(true);
- break;
- }
- }
- }
-
- if (inlineCallFrame) {
- // We have a bound on the types even though it's random access. Take advantage of this.
-
- AbstractValue result;
- for (unsigned i = inlineCallFrame->arguments.size(); i-- > 1;) {
- result.merge(
- m_state.variables().operand(
- virtualRegisterForArgument(i) + inlineCallFrame->stackOffset));
- }
-
- if (result.value())
- m_state.setFoundConstants(true);
- forNode(node) = result;
- break;
- }
-
+ node->setCanExit(true);
+ clobberWorld(node->codeOrigin, clobberLimit);
forNode(node).makeHeapTop();
break;
- }
case RegExpExec:
forNode(node).makeHeapTop();
@@ -1503,6 +1147,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
// constant propagation, but we can do better:
// We can specialize the source variable's value on each direction of
// the branch.
+ node->setCanExit(true); // This is overly conservative.
m_state.setBranchDirection(TakeBoth);
break;
}
@@ -1520,6 +1165,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
case Throw:
case ThrowReferenceError:
m_state.setIsValid(false);
+ node->setCanExit(true);
break;
case ToPrimitive: {
@@ -1531,38 +1177,58 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
ASSERT(node->child1().useKind() == UntypedUse);
- if (!forNode(node->child1()).m_type) {
- m_state.setIsValid(false);
- break;
- }
-
- if (!(forNode(node->child1()).m_type & ~(SpecFullNumber | SpecBoolean | SpecString | SpecSymbol))) {
- m_state.setFoundConstants(true);
- forNode(node) = forNode(node->child1());
- break;
- }
-
- clobberWorld(node->origin.semantic, clobberLimit);
+ AbstractValue& source = forNode(node->child1());
+ AbstractValue& destination = forNode(node);
- forNode(node).setType(m_graph, (SpecHeapTop & ~SpecCell) | SpecString | SpecSymbol);
+ // NB. The more canonical way of writing this would have been:
+ //
+ // destination = source;
+ // if (destination.m_type & !(SpecFullNumber | SpecString | SpecBoolean)) {
+ // destination.filter(SpecFullNumber | SpecString | SpecBoolean);
+ // AbstractValue string;
+ // string.set(vm->stringStructure);
+ // destination.merge(string);
+ // }
+ //
+ // The reason why this would, in most other cases, have been better is that
+ // then destination would preserve any non-SpeculatedType knowledge of source.
+ // As it stands, the code below forgets any non-SpeculatedType knowledge that
+ // source would have had. Fortunately, though, for things like strings and
+ // numbers and booleans, we don't care about the non-SpeculatedType knowedge:
+ // the structure won't tell us anything we don't already know, and neither
+ // will ArrayModes. And if the source was a meaningful constant then we
+ // would have handled that above. Unfortunately, this does mean that
+ // ToPrimitive will currently forget string constants. But that's not a big
+ // deal since we don't do any optimization on those currently.
+
+ clobberWorld(node->codeOrigin, clobberLimit);
+
+ SpeculatedType type = source.m_type;
+ if (type & ~(SpecFullNumber | SpecString | SpecBoolean))
+ type = (SpecHeapTop & ~SpecCell) | SpecString;
+
+ destination.setType(type);
+ if (destination.isClear())
+ m_state.setIsValid(false);
break;
}
- case ToString:
- case CallStringConstructor: {
+ case ToString: {
switch (node->child1().useKind()) {
case StringObjectUse:
// This also filters that the StringObject has the primordial StringObject
// structure.
filter(
node->child1(),
- m_graph.globalObjectFor(node->origin.semantic)->stringObjectStructure());
+ m_graph.globalObjectFor(node->codeOrigin)->stringObjectStructure());
+ node->setCanExit(true); // We could be more precise but it's likely not worth it.
break;
case StringOrStringObjectUse:
+ node->setCanExit(true); // We could be more precise but it's likely not worth it.
break;
case CellUse:
case UntypedUse:
- clobberWorld(node->origin.semantic, clobberLimit);
+ clobberWorld(node->codeOrigin, clobberLimit);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
@@ -1579,19 +1245,25 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
}
case NewArray:
+ node->setCanExit(true);
forNode(node).set(
m_graph,
- m_graph.globalObjectFor(node->origin.semantic)->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()));
+ m_graph.globalObjectFor(node->codeOrigin)->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()));
+ m_state.setHaveStructures(true);
break;
case NewArrayBuffer:
+ node->setCanExit(true);
forNode(node).set(
m_graph,
- m_graph.globalObjectFor(node->origin.semantic)->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()));
+ m_graph.globalObjectFor(node->codeOrigin)->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()));
+ m_state.setHaveStructures(true);
break;
case NewArrayWithSize:
- forNode(node).setType(m_graph, SpecArray);
+ node->setCanExit(true);
+ forNode(node).setType(SpecArray);
+ m_state.setHaveStructures(true);
break;
case NewTypedArray:
@@ -1599,7 +1271,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
case Int32Use:
break;
case UntypedUse:
- clobberWorld(node->origin.semantic, clobberLimit);
+ clobberWorld(node->codeOrigin, clobberLimit);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
@@ -1607,19 +1279,21 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
}
forNode(node).set(
m_graph,
- m_graph.globalObjectFor(node->origin.semantic)->typedArrayStructure(
+ m_graph.globalObjectFor(node->codeOrigin)->typedArrayStructure(
node->typedArrayType()));
+ m_state.setHaveStructures(true);
break;
case NewRegexp:
- forNode(node).set(m_graph, m_graph.globalObjectFor(node->origin.semantic)->regExpStructure());
+ forNode(node).set(m_graph, m_graph.globalObjectFor(node->codeOrigin)->regExpStructure());
+ m_state.setHaveStructures(true);
break;
case ToThis: {
AbstractValue& source = forNode(node->child1());
AbstractValue& destination = forNode(node);
- if (m_graph.executableFor(node->origin.semantic)->isStrictMode())
+ if (m_graph.executableFor(node->codeOrigin)->isStrictMode())
destination.makeHeapTop();
else {
destination = source;
@@ -1629,277 +1303,235 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
}
case CreateThis: {
- // FIXME: We can fold this to NewObject if the incoming callee is a constant.
- forNode(node).setType(m_graph, SpecFinalObject);
+ forNode(node).setType(SpecFinalObject);
break;
}
+ case AllocationProfileWatchpoint:
+ node->setCanExit(true);
+ break;
+
case NewObject:
ASSERT(node->structure());
forNode(node).set(m_graph, node->structure());
+ m_state.setHaveStructures(true);
break;
- case PhantomNewObject:
- case PhantomNewFunction:
- case PhantomCreateActivation:
- case PhantomDirectArguments:
- case PhantomClonedArguments:
- case BottomValue:
- m_state.setDidClobber(true); // Prevent constant folding.
- // This claims to return bottom.
- break;
-
- case PutHint:
- break;
-
- case MaterializeNewObject: {
- StructureSet set;
-
- m_phiChildren->forAllTransitiveIncomingValues(
- m_graph.varArgChild(node, 0).node(),
- [&] (Node* incoming) {
- set.add(incoming->castConstant<Structure*>());
- });
-
- forNode(node).set(m_graph, set);
- break;
- }
-
case CreateActivation:
- case MaterializeCreateActivation:
forNode(node).set(
- m_graph, m_codeBlock->globalObjectFor(node->origin.semantic)->activationStructure());
+ m_graph, m_codeBlock->globalObjectFor(node->codeOrigin)->activationStructure());
+ m_state.setHaveStructures(true);
break;
- case CreateDirectArguments:
- forNode(node).set(m_graph, m_codeBlock->globalObjectFor(node->origin.semantic)->directArgumentsStructure());
+ case FunctionReentryWatchpoint:
+ case TypedArrayWatchpoint:
break;
-
- case CreateScopedArguments:
- forNode(node).set(m_graph, m_codeBlock->globalObjectFor(node->origin.semantic)->scopedArgumentsStructure());
+
+ case CreateArguments:
+ forNode(node) = forNode(node->child1());
+ forNode(node).filter(~SpecEmpty);
+ forNode(node).merge(SpecArguments);
break;
- case CreateClonedArguments:
- forNode(node).setType(m_graph, SpecObjectOther);
+ case TearOffActivation:
+ case TearOffArguments:
+ // Does nothing that is user-visible.
break;
-
- case NewFunction:
- forNode(node).set(
- m_graph, m_codeBlock->globalObjectFor(node->origin.semantic)->functionStructure());
+
+ case CheckArgumentsNotCreated:
+ if (isEmptySpeculation(
+ m_state.variables().operand(
+ m_graph.argumentsRegisterFor(node->codeOrigin).offset()).m_type))
+ m_state.setFoundConstants(true);
+ else
+ node->setCanExit(true);
+ break;
+
+ case GetMyArgumentsLength:
+ // We know that this executable does not escape its arguments, so we can optimize
+ // the arguments a bit. Note that this is not sufficient to force constant folding
+ // of GetMyArgumentsLength, because GetMyArgumentsLength is a clobbering operation.
+ // We perform further optimizations on this later on.
+ if (node->codeOrigin.inlineCallFrame) {
+ forNode(node).set(
+ m_graph, jsNumber(node->codeOrigin.inlineCallFrame->arguments.size() - 1));
+ } else
+ forNode(node).setType(SpecInt32);
+ node->setCanExit(
+ !isEmptySpeculation(
+ m_state.variables().operand(
+ m_graph.argumentsRegisterFor(node->codeOrigin)).m_type));
+ break;
+
+ case GetMyArgumentsLengthSafe:
+ // This potentially clobbers all structures if the arguments object had a getter
+ // installed on the length property.
+ clobberWorld(node->codeOrigin, clobberLimit);
+ // We currently make no guarantee about what this returns because it does not
+ // speculate that the length property is actually a length.
+ forNode(node).makeHeapTop();
break;
- case GetCallee:
- if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())) {
- InferredValue* singleton = executable->singletonFunction();
- if (JSValue value = singleton->inferredValue()) {
- m_graph.watchpoints().addLazily(singleton);
- JSFunction* function = jsCast<JSFunction*>(value);
- setConstant(node, *m_graph.freeze(function));
- break;
- }
- }
- forNode(node).setType(m_graph, SpecFunction);
+ case GetMyArgumentByVal:
+ node->setCanExit(true);
+ // We know that this executable does not escape its arguments, so we can optimize
+ // the arguments a bit. Note that this ends up being further optimized by the
+ // ArgumentsSimplificationPhase.
+ forNode(node).makeHeapTop();
break;
- case GetArgumentCount:
- forNode(node).setType(SpecInt32);
+ case GetMyArgumentByValSafe:
+ node->setCanExit(true);
+ // This potentially clobbers all structures if the property we're accessing has
+ // a getter. We don't speculate against this.
+ clobberWorld(node->codeOrigin, clobberLimit);
+ // And the result is unknown.
+ forNode(node).makeHeapTop();
break;
- case GetGetter: {
- JSValue base = forNode(node->child1()).m_value;
- if (base) {
- GetterSetter* getterSetter = jsCast<GetterSetter*>(base);
- if (!getterSetter->isGetterNull()) {
- setConstant(node, *m_graph.freeze(getterSetter->getterConcurrently()));
- break;
- }
- }
+ case NewFunction: {
+ AbstractValue& value = forNode(node);
+ value = forNode(node->child1());
- forNode(node).setType(m_graph, SpecObject);
+ if (!(value.m_type & SpecEmpty)) {
+ m_state.setFoundConstants(true);
+ break;
+ }
+
+ value.setType((value.m_type & ~SpecEmpty) | SpecFunction);
break;
}
+
+ case NewFunctionExpression:
+ case NewFunctionNoCheck:
+ forNode(node).set(
+ m_graph, m_codeBlock->globalObjectFor(node->codeOrigin)->functionStructure());
+ break;
- case GetSetter: {
- JSValue base = forNode(node->child1()).m_value;
- if (base) {
- GetterSetter* getterSetter = jsCast<GetterSetter*>(base);
- if (!getterSetter->isSetterNull()) {
- setConstant(node, *m_graph.freeze(getterSetter->setterConcurrently()));
- break;
- }
- }
-
- forNode(node).setType(m_graph, SpecObject);
+ case GetCallee:
+ forNode(node).setType(SpecFunction);
break;
- }
- case GetScope:
- if (JSValue base = forNode(node->child1()).m_value) {
- if (JSFunction* function = jsDynamicCast<JSFunction*>(base)) {
- setConstant(node, *m_graph.freeze(function->scope()));
- break;
- }
- }
- forNode(node).setType(m_graph, SpecObjectOther);
+ case GetScope: // FIXME: We could get rid of these if we know that the JSFunction is a constant. https://bugs.webkit.org/show_bug.cgi?id=106202
+ case GetMyScope:
+ case SkipTopScope:
+ forNode(node).setType(SpecObjectOther);
break;
case SkipScope: {
JSValue child = forNode(node->child1()).value();
if (child) {
- setConstant(node, *m_graph.freeze(JSValue(jsCast<JSScope*>(child.asCell())->next())));
+ setConstant(node, JSValue(jsCast<JSScope*>(child.asCell())->next()));
break;
}
- forNode(node).setType(m_graph, SpecObjectOther);
+ forNode(node).setType(SpecObjectOther);
break;
}
+ case GetClosureRegisters:
+ forNode(node).clear(); // The result is not a JS value.
+ break;
+
case GetClosureVar:
- if (JSValue value = m_graph.tryGetConstantClosureVar(forNode(node->child1()), node->scopeOffset())) {
- setConstant(node, *m_graph.freeze(value));
- break;
- }
forNode(node).makeHeapTop();
break;
case PutClosureVar:
- break;
-
- case GetFromArguments:
- forNode(node).makeHeapTop();
- break;
-
- case PutToArguments:
+ clobberCapturedVars(node->codeOrigin);
break;
case GetById:
- case GetByIdFlush: {
+ case GetByIdFlush:
+ node->setCanExit(true);
if (!node->prediction()) {
m_state.setIsValid(false);
break;
}
-
- AbstractValue& value = forNode(node->child1());
- if (!value.m_structure.isTop() && !value.m_structure.isClobbered()
- && (node->child1().useKind() == CellUse || !(value.m_type & ~SpecCell))) {
- GetByIdStatus status = GetByIdStatus::computeFor(
- value.m_structure.set(), m_graph.identifiers()[node->identifierNumber()]);
- if (status.isSimple()) {
- // Figure out what the result is going to be - is it TOP, a constant, or maybe
- // something more subtle?
- AbstractValue result;
- for (unsigned i = status.numVariants(); i--;) {
- // This thing won't give us a variant that involves prototypes. If it did, we'd
- // have more work to do here.
- DFG_ASSERT(m_graph, node, status[i].conditionSet().isEmpty());
+ if (isCellSpeculation(node->child1()->prediction())) {
+ if (Structure* structure = forNode(node->child1()).bestProvenStructure()) {
+ GetByIdStatus status = GetByIdStatus::computeFor(
+ m_graph.m_vm, structure,
+ m_graph.identifiers()[node->identifierNumber()]);
+ if (status.isSimple()) {
+ // Assert things that we can't handle and that the computeFor() method
+ // above won't be able to return.
+ ASSERT(status.structureSet().size() == 1);
+ ASSERT(!status.chain());
- JSValue constantResult =
- m_graph.tryGetConstantProperty(value, status[i].offset());
- if (!constantResult) {
- result.makeHeapTop();
- break;
- }
+ if (status.specificValue())
+ setConstant(node, status.specificValue());
+ else
+ forNode(node).makeHeapTop();
+ filter(node->child1(), status.structureSet());
- AbstractValue thisResult;
- thisResult.set(
- m_graph, *m_graph.freeze(constantResult),
- m_state.structureClobberState());
- result.merge(thisResult);
- }
- if (status.numVariants() == 1 || isFTL(m_graph.m_plan.mode))
m_state.setFoundConstants(true);
- forNode(node) = result;
- break;
+ m_state.setHaveStructures(true);
+ break;
+ }
}
}
-
- clobberWorld(node->origin.semantic, clobberLimit);
+ clobberWorld(node->codeOrigin, clobberLimit);
forNode(node).makeHeapTop();
break;
- }
- case GetArrayLength: {
- JSArrayBufferView* view = m_graph.tryGetFoldableView(
- forNode(node->child1()).m_value, node->arrayMode());
- if (view) {
- setConstant(node, jsNumber(view->length()));
- break;
- }
+ case GetArrayLength:
+ node->setCanExit(true); // Lies, but it's true for the common case of JSArray, so it's good enough.
forNode(node).setType(SpecInt32);
break;
- }
+ case CheckExecutable: {
+ // FIXME: We could track executables in AbstractValue, which would allow us to get rid of these checks
+ // more thoroughly. https://bugs.webkit.org/show_bug.cgi?id=106200
+ // FIXME: We could eliminate these entirely if we know the exact value that flows into this.
+ // https://bugs.webkit.org/show_bug.cgi?id=106201
+ node->setCanExit(true);
+ break;
+ }
+
case CheckStructure: {
+ // FIXME: We should be able to propagate the structure sets of constants (i.e. prototypes).
AbstractValue& value = forNode(node->child1());
ASSERT(!(value.m_type & ~SpecCell)); // Edge filtering should have already ensured this.
StructureSet& set = node->structureSet();
-
- // It's interesting that we could have proven that the object has a larger structure set
- // that includes the set we're testing. In that case we could make the structure check
- // more efficient. We currently don't.
-
- if (value.m_structure.isSubsetOf(set)) {
+
+ if (value.m_currentKnownStructure.isSubsetOf(set)) {
m_state.setFoundConstants(true);
break;
}
+ node->setCanExit(true);
+ m_state.setHaveStructures(true);
+
+ // If this structure check is attempting to prove knowledge already held in
+ // the futurePossibleStructure set then the constant folding phase should
+ // turn this into a watchpoint instead.
+ if (value.m_futurePossibleStructure.isSubsetOf(set)
+ && value.m_futurePossibleStructure.hasSingleton()) {
+ m_state.setFoundConstants(true);
+ filter(value, value.m_futurePossibleStructure.singleton());
+ break;
+ }
+
filter(value, set);
break;
}
- case CheckStructureImmediate: {
- // FIXME: This currently can only reason about one structure at a time.
- // https://bugs.webkit.org/show_bug.cgi?id=136988
-
+ case StructureTransitionWatchpoint: {
AbstractValue& value = forNode(node->child1());
- StructureSet& set = node->structureSet();
-
- if (value.value()) {
- if (Structure* structure = jsDynamicCast<Structure*>(value.value())) {
- if (set.contains(structure)) {
- m_state.setFoundConstants(true);
- break;
- }
- }
- m_state.setIsValid(false);
- break;
- }
-
- if (m_phiChildren) {
- bool allGood = true;
- m_phiChildren->forAllTransitiveIncomingValues(
- node,
- [&] (Node* incoming) {
- if (Structure* structure = incoming->dynamicCastConstant<Structure*>()) {
- if (set.contains(structure))
- return;
- }
- allGood = false;
- });
- if (allGood) {
- m_state.setFoundConstants(true);
- break;
- }
- }
-
- if (Structure* structure = set.onlyStructure()) {
- filterByValue(node->child1(), *m_graph.freeze(structure));
- break;
- }
-
- // Aw shucks, we can't do anything!
+
+ filter(value, node->structure());
+ m_state.setHaveStructures(true);
+ node->setCanExit(true);
break;
}
-
+
case PutStructure:
- if (!forNode(node->child1()).m_structure.isClear()) {
- if (forNode(node->child1()).m_structure.onlyStructure() == node->transition()->next)
- m_state.setFoundConstants(true);
- else {
- observeTransition(
- clobberLimit, node->transition()->previous, node->transition()->next);
- forNode(node->child1()).changeStructure(m_graph, node->transition()->next);
- }
+ case PhantomPutStructure:
+ if (!forNode(node->child1()).m_currentKnownStructure.isClear()) {
+ clobberStructures(clobberLimit);
+ forNode(node->child1()).set(m_graph, node->structureTransitionData().newStructure);
+ m_state.setHaveStructures(true);
}
break;
case GetButterfly:
@@ -1912,6 +1544,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
m_state.setFoundConstants(true);
break;
}
+ node->setCanExit(true); // Lies, but this is followed by operations (like GetByVal) that always exit, so there is no point in us trying to be clever here.
switch (node->arrayMode().type()) {
case Array::String:
filter(node->child1(), SpecString);
@@ -1919,15 +1552,11 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
case Array::Int32:
case Array::Double:
case Array::Contiguous:
- case Array::Undecided:
case Array::ArrayStorage:
case Array::SlowPutArrayStorage:
break;
- case Array::DirectArguments:
- filter(node->child1(), SpecDirectArguments);
- break;
- case Array::ScopedArguments:
- filter(node->child1(), SpecScopedArguments);
+ case Array::Arguments:
+ filter(node->child1(), SpecArguments);
break;
case Array::Int8Array:
filter(node->child1(), SpecInt8Array);
@@ -1961,6 +1590,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
break;
}
filterArrayModes(node->child1(), node->arrayMode().arrayModesThatPassFiltering());
+ m_state.setHaveStructures(true);
break;
}
case Arrayify: {
@@ -1968,237 +1598,59 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
m_state.setFoundConstants(true);
break;
}
- ASSERT(node->arrayMode().conversion() == Array::Convert);
+ ASSERT(node->arrayMode().conversion() == Array::Convert
+ || node->arrayMode().conversion() == Array::RageConvert);
+ node->setCanExit(true);
clobberStructures(clobberLimit);
filterArrayModes(node->child1(), node->arrayMode().arrayModesThatPassFiltering());
+ m_state.setHaveStructures(true);
break;
}
case ArrayifyToStructure: {
AbstractValue& value = forNode(node->child1());
- if (value.m_structure.isSubsetOf(StructureSet(node->structure())))
+ StructureSet set = node->structure();
+ if (value.m_futurePossibleStructure.isSubsetOf(set)
+ || value.m_currentKnownStructure.isSubsetOf(set))
m_state.setFoundConstants(true);
+ node->setCanExit(true);
clobberStructures(clobberLimit);
-
- // We have a bunch of options of how to express the abstract set at this point. Let set S
- // be the set of structures that the value had before clobbering and assume that all of
- // them are watchable. The new value should be the least expressible upper bound of the
- // intersection of "values that currently have structure = node->structure()" and "values
- // that have structure in S plus any structure transition-reachable from S". Assume that
- // node->structure() is not in S but it is transition-reachable from S. Then we would
- // like to say that the result is "values that have structure = node->structure() until
- // we invalidate", but there is no way to express this using the AbstractValue syntax. So
- // we must choose between:
- //
- // 1) "values that currently have structure = node->structure()". This is a valid
- // superset of the value that we really want, and it's specific enough to satisfy the
- // preconditions of the array access that this is guarding. It's also specific enough
- // to allow relevant optimizations in the case that we didn't have a contradiction
- // like in this example. Notice that in the abscence of any contradiction, this result
- // is precise rather than being a conservative LUB.
- //
- // 2) "values that currently hava structure in S plus any structure transition-reachable
- // from S". This is also a valid superset of the value that we really want, but it's
- // not specific enough to satisfy the preconditions of the array access that this is
- // guarding - so playing such shenanigans would preclude us from having assertions on
- // the typing preconditions of any array accesses. This would also not be a desirable
- // answer in the absence of a contradiction.
- //
- // Note that it's tempting to simply say that the resulting value is BOTTOM because of
- // the contradiction. That would be wrong, since we haven't hit an invalidation point,
- // yet.
- value.set(m_graph, node->structure());
- break;
- }
- case GetIndexedPropertyStorage: {
- JSArrayBufferView* view = m_graph.tryGetFoldableView(
- forNode(node->child1()).m_value, node->arrayMode());
- if (view)
- m_state.setFoundConstants(true);
- forNode(node).clear();
+ filter(value, set);
+ m_state.setHaveStructures(true);
break;
}
+ case GetIndexedPropertyStorage:
case ConstantStoragePointer: {
forNode(node).clear();
break;
}
case GetTypedArrayByteOffset: {
- JSArrayBufferView* view = m_graph.tryGetFoldableView(forNode(node->child1()).m_value);
- if (view) {
- setConstant(node, jsNumber(view->byteOffset()));
- break;
- }
forNode(node).setType(SpecInt32);
break;
}
case GetByOffset: {
- StorageAccessData& data = node->storageAccessData();
- JSValue result = m_graph.tryGetConstantProperty(forNode(node->child2()), data.offset);
- if (result) {
- setConstant(node, *m_graph.freeze(result));
- break;
- }
-
forNode(node).makeHeapTop();
break;
}
-
- case GetGetterSetterByOffset: {
- StorageAccessData& data = node->storageAccessData();
- JSValue result = m_graph.tryGetConstantProperty(forNode(node->child2()), data.offset);
- if (result && jsDynamicCast<GetterSetter*>(result)) {
- setConstant(node, *m_graph.freeze(result));
- break;
- }
-
- forNode(node).set(m_graph, m_graph.m_vm.getterSetterStructure.get());
- break;
- }
-
- case MultiGetByOffset: {
- // This code will filter the base value in a manner that is possibly different (either more
- // or less precise) than the way it would be filtered if this was strength-reduced to a
- // CheckStructure. This is fine. It's legal for different passes over the code to prove
- // different things about the code, so long as all of them are sound. That even includes
- // one guy proving that code should never execute (due to a contradiction) and another guy
- // not finding that contradiction. If someone ever proved that there would be a
- // contradiction then there must always be a contradiction even if subsequent passes don't
- // realize it. This is the case here.
-
- // Ordinarily you have to be careful with calling setFoundConstants()
- // because of the effect on compile times, but this node is FTL-only.
- m_state.setFoundConstants(true);
-
- AbstractValue base = forNode(node->child1());
- StructureSet baseSet;
- AbstractValue result;
- for (const MultiGetByOffsetCase& getCase : node->multiGetByOffsetData().cases) {
- StructureSet set = getCase.set();
- set.filter(base);
- if (set.isEmpty())
- continue;
- baseSet.merge(set);
-
- if (getCase.method().kind() != GetByOffsetMethod::Constant) {
- result.makeHeapTop();
- continue;
- }
-
- AbstractValue thisResult;
- thisResult.set(
- m_graph,
- *getCase.method().constant(),
- m_state.structureClobberState());
- result.merge(thisResult);
- }
-
- if (forNode(node->child1()).changeStructure(m_graph, baseSet) == Contradiction)
- m_state.setIsValid(false);
-
- forNode(node) = result;
- break;
- }
case PutByOffset: {
break;
}
-
- case MultiPutByOffset: {
- StructureSet newSet;
- TransitionVector transitions;
-
- // Ordinarily you have to be careful with calling setFoundConstants()
- // because of the effect on compile times, but this node is FTL-only.
- m_state.setFoundConstants(true);
-
- AbstractValue base = forNode(node->child1());
-
- for (unsigned i = node->multiPutByOffsetData().variants.size(); i--;) {
- const PutByIdVariant& variant = node->multiPutByOffsetData().variants[i];
- StructureSet thisSet = variant.oldStructure();
- thisSet.filter(base);
- if (thisSet.isEmpty())
- continue;
- if (variant.kind() == PutByIdVariant::Transition) {
- if (thisSet.onlyStructure() != variant.newStructure()) {
- transitions.append(
- Transition(variant.oldStructureForTransition(), variant.newStructure()));
- } // else this is really a replace.
- newSet.add(variant.newStructure());
- } else {
- ASSERT(variant.kind() == PutByIdVariant::Replace);
- newSet.merge(thisSet);
- }
- }
-
- observeTransitions(clobberLimit, transitions);
- if (forNode(node->child1()).changeStructure(m_graph, newSet) == Contradiction)
- m_state.setIsValid(false);
- break;
- }
-
- case GetExecutable: {
- JSValue value = forNode(node->child1()).value();
- if (value) {
- JSFunction* function = jsDynamicCast<JSFunction*>(value);
- if (function) {
- setConstant(node, *m_graph.freeze(function->executable()));
- break;
- }
- }
- forNode(node).setType(m_graph, SpecCellOther);
- break;
- }
-
- case CheckCell: {
+
+ case CheckFunction: {
JSValue value = forNode(node->child1()).value();
- if (value == node->cellOperand()->value()) {
+ if (value == node->function()) {
m_state.setFoundConstants(true);
ASSERT(value);
break;
}
- filterByValue(node->child1(), *node->cellOperand());
- break;
- }
-
- case CheckNotEmpty: {
- AbstractValue& value = forNode(node->child1());
- if (!(value.m_type & SpecEmpty)) {
- m_state.setFoundConstants(true);
- break;
- }
- filter(value, ~SpecEmpty);
- break;
- }
-
- case CheckIdent: {
- AbstractValue& value = forNode(node->child1());
- UniquedStringImpl* uid = node->uidOperand();
- ASSERT(uid->isSymbol() ? !(value.m_type & ~SpecSymbol) : !(value.m_type & ~SpecStringIdent)); // Edge filtering should have already ensured this.
-
- JSValue childConstant = value.value();
- if (childConstant) {
- if (uid->isSymbol()) {
- ASSERT(childConstant.isSymbol());
- if (asSymbol(childConstant)->privateName().uid() == uid) {
- m_state.setFoundConstants(true);
- break;
- }
- } else {
- ASSERT(childConstant.isString());
- if (asString(childConstant)->tryGetValueImpl() == uid) {
- m_state.setFoundConstants(true);
- break;
- }
- }
- }
-
- filter(value, uid->isSymbol() ? SpecSymbol : SpecStringIdent);
+ node->setCanExit(true); // Lies! We can do better.
+ filterByValue(node->child1(), node->function());
break;
}
-
+
case CheckInBounds: {
JSValue left = forNode(node->child1()).value();
JSValue right = forNode(node->child2()).value();
@@ -2207,140 +1659,79 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
m_state.setFoundConstants(true);
break;
}
+
+ node->setCanExit(true);
break;
}
case PutById:
- case PutByIdFlush:
- case PutByIdDirect: {
- AbstractValue& value = forNode(node->child1());
- if (!value.m_structure.isTop() && !value.m_structure.isClobbered()) {
+ case PutByIdDirect:
+ node->setCanExit(true);
+ if (Structure* structure = forNode(node->child1()).bestProvenStructure()) {
PutByIdStatus status = PutByIdStatus::computeFor(
- m_graph.globalObjectFor(node->origin.semantic),
- value.m_structure.set(),
+ m_graph.m_vm,
+ m_graph.globalObjectFor(node->codeOrigin),
+ structure,
m_graph.identifiers()[node->identifierNumber()],
node->op() == PutByIdDirect);
-
- if (status.isSimple()) {
- StructureSet newSet;
- TransitionVector transitions;
-
- for (unsigned i = status.numVariants(); i--;) {
- const PutByIdVariant& variant = status[i];
- if (variant.kind() == PutByIdVariant::Transition) {
- transitions.append(
- Transition(
- variant.oldStructureForTransition(), variant.newStructure()));
- m_graph.registerStructure(variant.newStructure());
- newSet.add(variant.newStructure());
- } else {
- ASSERT(variant.kind() == PutByIdVariant::Replace);
- newSet.merge(variant.oldStructure());
- }
- }
-
- if (status.numVariants() == 1 || isFTL(m_graph.m_plan.mode))
- m_state.setFoundConstants(true);
-
- observeTransitions(clobberLimit, transitions);
- if (forNode(node->child1()).changeStructure(m_graph, newSet) == Contradiction)
- m_state.setIsValid(false);
+ if (status.isSimpleReplace()) {
+ filter(node->child1(), structure);
+ m_state.setFoundConstants(true);
+ m_state.setHaveStructures(true);
+ break;
+ }
+ if (status.isSimpleTransition()) {
+ clobberStructures(clobberLimit);
+ forNode(node->child1()).set(m_graph, status.newStructure());
+ m_state.setHaveStructures(true);
+ m_state.setFoundConstants(true);
break;
}
}
-
- clobberWorld(node->origin.semantic, clobberLimit);
+ clobberWorld(node->codeOrigin, clobberLimit);
break;
- }
- case In: {
+ case In:
// FIXME: We can determine when the property definitely exists based on abstract
// value information.
- clobberWorld(node->origin.semantic, clobberLimit);
+ clobberWorld(node->codeOrigin, clobberLimit);
forNode(node).setType(SpecBoolean);
break;
- }
- case GetEnumerableLength: {
- forNode(node).setType(SpecInt32);
- break;
- }
- case HasGenericProperty: {
- forNode(node).setType(SpecBoolean);
- break;
- }
- case HasStructureProperty: {
- forNode(node).setType(SpecBoolean);
- break;
- }
- case HasIndexedProperty: {
- ArrayMode mode = node->arrayMode();
- switch (mode.type()) {
- case Array::Int32:
- case Array::Double:
- case Array::Contiguous:
- case Array::ArrayStorage: {
- break;
- }
- default: {
- clobberWorld(node->origin.semantic, clobberLimit);
- break;
- }
- }
- forNode(node).setType(SpecBoolean);
- break;
- }
- case GetDirectPname: {
- clobberWorld(node->origin.semantic, clobberLimit);
- forNode(node).makeHeapTop();
- break;
- }
- case GetPropertyEnumerator: {
- forNode(node).setType(m_graph, SpecCell);
- break;
- }
- case GetEnumeratorStructurePname: {
- forNode(node).setType(m_graph, SpecString | SpecOther);
- break;
- }
- case GetEnumeratorGenericPname: {
- forNode(node).setType(m_graph, SpecString | SpecOther);
- break;
- }
- case ToIndexString: {
- forNode(node).setType(m_graph, SpecString);
- break;
- }
-
case GetGlobalVar:
forNode(node).makeHeapTop();
break;
+ case VariableWatchpoint:
case VarInjectionWatchpoint:
+ node->setCanExit(true);
+ break;
+
case PutGlobalVar:
case NotifyWrite:
break;
case CheckHasInstance:
+ node->setCanExit(true);
// Sadly, we don't propagate the fact that we've done CheckHasInstance
break;
case InstanceOf:
+ node->setCanExit(true);
// Again, sadly, we don't propagate the fact that we've done InstanceOf
forNode(node).setType(SpecBoolean);
break;
case Phi:
RELEASE_ASSERT(m_graph.m_form == SSA);
- // The state of this node would have already been decided, but it may have become a
- // constant, in which case we'd like to know.
- if (forNode(node).m_value)
- m_state.setFoundConstants(true);
+ // The state of this node would have already been decided.
break;
case Upsilon: {
m_state.createValueForNode(node->phi());
- forNode(node->phi()) = forNode(node->child1());
+ AbstractValue& value = forNode(node->child1());
+ forNode(node) = value;
+ forNode(node->phi()) = value;
break;
}
@@ -2350,49 +1741,38 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
case Call:
case Construct:
- case CallVarargs:
- case CallForwardVarargs:
- case ConstructVarargs:
- case ConstructForwardVarargs:
- clobberWorld(node->origin.semantic, clobberLimit);
+ node->setCanExit(true);
+ clobberWorld(node->codeOrigin, clobberLimit);
forNode(node).makeHeapTop();
break;
case ForceOSRExit:
- case CheckBadCell:
+ node->setCanExit(true);
m_state.setIsValid(false);
break;
case InvalidationPoint:
- forAllValues(clobberLimit, AbstractValue::observeInvalidationPointFor);
- m_state.setStructureClobberState(StructuresAreWatched);
+ node->setCanExit(true);
break;
case CheckWatchdogTimer:
+ node->setCanExit(true);
break;
case Breakpoint:
case ProfileWillCall:
case ProfileDidCall:
- case ProfileType:
- case ProfileControlFlow:
case Phantom:
+ case Check:
case CountExecution:
case CheckTierUpInLoop:
case CheckTierUpAtReturn:
break;
- case Check: {
- // Simplify out checks that don't actually do checking.
- for (unsigned i = 0; i < AdjacencyList::Size; ++i) {
- Edge edge = node->children.child(i);
- if (!edge)
- break;
- if (edge.isProved() || edge.willNotHaveCheck()) {
- m_state.setFoundConstants(true);
- break;
- }
- }
+ case ConditionalStoreBarrier: {
+ if (!needsTypeCheck(node->child2().node(), ~SpecCell))
+ m_state.setFoundConstants(true);
+ filter(node->child1(), SpecCell);
break;
}
@@ -2401,17 +1781,21 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
break;
}
+ case StoreBarrierWithNullCheck: {
+ break;
+ }
+
case CheckTierUpAndOSREnter:
- case CheckTierUpWithNestedTriggerAndOSREnter:
case LoopHint:
- case ZombieHint:
+ // We pretend that it can exit because it may want to get all state.
+ node->setCanExit(true);
break;
+ case ZombieHint:
case Unreachable:
case LastNodeType:
case ArithIMul:
- case FiatInt52:
- DFG_CRASH(m_graph, node, "Unexpected node type");
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
@@ -2428,8 +1812,9 @@ template<typename AbstractStateType>
bool AbstractInterpreter<AbstractStateType>::execute(unsigned indexInBlock)
{
Node* node = m_state.block()->at(indexInBlock);
+ if (!startExecuting(node))
+ return true;
- startExecuting();
executeEdges(node);
return executeEffects(indexInBlock, node);
}
@@ -2437,99 +1822,79 @@ bool AbstractInterpreter<AbstractStateType>::execute(unsigned indexInBlock)
template<typename AbstractStateType>
bool AbstractInterpreter<AbstractStateType>::execute(Node* node)
{
- startExecuting();
+ if (!startExecuting(node))
+ return true;
+
executeEdges(node);
return executeEffects(UINT_MAX, node);
}
template<typename AbstractStateType>
void AbstractInterpreter<AbstractStateType>::clobberWorld(
- const CodeOrigin&, unsigned clobberLimit)
+ const CodeOrigin& codeOrigin, unsigned clobberLimit)
{
+ clobberCapturedVars(codeOrigin);
clobberStructures(clobberLimit);
}
template<typename AbstractStateType>
-template<typename Functor>
-void AbstractInterpreter<AbstractStateType>::forAllValues(
- unsigned clobberLimit, Functor& functor)
+void AbstractInterpreter<AbstractStateType>::clobberCapturedVars(const CodeOrigin& codeOrigin)
+{
+ if (codeOrigin.inlineCallFrame) {
+ const BitVector& capturedVars = codeOrigin.inlineCallFrame->capturedVars;
+ for (size_t i = capturedVars.size(); i--;) {
+ if (!capturedVars.quickGet(i))
+ continue;
+ m_state.variables().local(i).makeHeapTop();
+ }
+ } else {
+ for (size_t i = m_codeBlock->m_numVars; i--;) {
+ if (m_codeBlock->isCaptured(virtualRegisterForLocal(i)))
+ m_state.variables().local(i).makeHeapTop();
+ }
+ }
+
+ for (size_t i = m_state.variables().numberOfArguments(); i--;) {
+ if (m_codeBlock->isCaptured(virtualRegisterForArgument(i)))
+ m_state.variables().argument(i).makeHeapTop();
+ }
+}
+
+template<typename AbstractStateType>
+void AbstractInterpreter<AbstractStateType>::clobberStructures(unsigned clobberLimit)
{
- SamplingRegion samplingRegion("DFG AI For All Values");
+ if (!m_state.haveStructures())
+ return;
if (clobberLimit >= m_state.block()->size())
clobberLimit = m_state.block()->size();
else
clobberLimit++;
ASSERT(clobberLimit <= m_state.block()->size());
for (size_t i = clobberLimit; i--;)
- functor(forNode(m_state.block()->at(i)));
+ forNode(m_state.block()->at(i)).clobberStructures();
if (m_graph.m_form == SSA) {
HashSet<Node*>::iterator iter = m_state.block()->ssa->liveAtHead.begin();
HashSet<Node*>::iterator end = m_state.block()->ssa->liveAtHead.end();
for (; iter != end; ++iter)
- functor(forNode(*iter));
+ forNode(*iter).clobberStructures();
}
for (size_t i = m_state.variables().numberOfArguments(); i--;)
- functor(m_state.variables().argument(i));
+ m_state.variables().argument(i).clobberStructures();
for (size_t i = m_state.variables().numberOfLocals(); i--;)
- functor(m_state.variables().local(i));
-}
-
-template<typename AbstractStateType>
-void AbstractInterpreter<AbstractStateType>::clobberStructures(unsigned clobberLimit)
-{
- SamplingRegion samplingRegion("DFG AI Clobber Structures");
- forAllValues(clobberLimit, AbstractValue::clobberStructuresFor);
- setDidClobber();
-}
-
-template<typename AbstractStateType>
-void AbstractInterpreter<AbstractStateType>::observeTransition(
- unsigned clobberLimit, Structure* from, Structure* to)
-{
- AbstractValue::TransitionObserver transitionObserver(from, to);
- forAllValues(clobberLimit, transitionObserver);
-
- ASSERT(!from->dfgShouldWatch()); // We don't need to claim to be in a clobbered state because 'from' was never watchable (during the time we were compiling), hence no constants ever introduced into the DFG IR that ever had a watchable structure would ever have the same structure as from.
-}
-
-template<typename AbstractStateType>
-void AbstractInterpreter<AbstractStateType>::observeTransitions(
- unsigned clobberLimit, const TransitionVector& vector)
-{
- AbstractValue::TransitionsObserver transitionsObserver(vector);
- forAllValues(clobberLimit, transitionsObserver);
-
- if (!ASSERT_DISABLED) {
- // We don't need to claim to be in a clobbered state because none of the Transition::previous structures are watchable.
- for (unsigned i = vector.size(); i--;)
- ASSERT(!vector[i].previous->dfgShouldWatch());
- }
-}
-
-template<typename AbstractStateType>
-void AbstractInterpreter<AbstractStateType>::setDidClobber()
-{
+ m_state.variables().local(i).clobberStructures();
+ m_state.setHaveStructures(true);
m_state.setDidClobber(true);
- m_state.setStructureClobberState(StructuresAreClobbered);
-}
-
-template<typename AbstractStateType>
-void AbstractInterpreter<AbstractStateType>::dump(PrintStream& out) const
-{
- const_cast<AbstractInterpreter<AbstractStateType>*>(this)->dump(out);
}
template<typename AbstractStateType>
void AbstractInterpreter<AbstractStateType>::dump(PrintStream& out)
{
CommaPrinter comma(" ");
- HashSet<Node*> seen;
if (m_graph.m_form == SSA) {
HashSet<Node*>::iterator iter = m_state.block()->ssa->liveAtHead.begin();
HashSet<Node*>::iterator end = m_state.block()->ssa->liveAtHead.end();
for (; iter != end; ++iter) {
Node* node = *iter;
- seen.add(node);
AbstractValue& value = forNode(node);
if (value.isClear())
continue;
@@ -2538,25 +1903,11 @@ void AbstractInterpreter<AbstractStateType>::dump(PrintStream& out)
}
for (size_t i = 0; i < m_state.block()->size(); ++i) {
Node* node = m_state.block()->at(i);
- seen.add(node);
AbstractValue& value = forNode(node);
if (value.isClear())
continue;
out.print(comma, node, ":", value);
}
- if (m_graph.m_form == SSA) {
- HashSet<Node*>::iterator iter = m_state.block()->ssa->liveAtTail.begin();
- HashSet<Node*>::iterator end = m_state.block()->ssa->liveAtTail.end();
- for (; iter != end; ++iter) {
- Node* node = *iter;
- if (seen.contains(node))
- continue;
- AbstractValue& value = forNode(node);
- if (value.isClear())
- continue;
- out.print(comma, node, ":", value);
- }
- }
}
template<typename AbstractStateType>
@@ -2591,7 +1942,7 @@ FiltrationResult AbstractInterpreter<AbstractStateType>::filter(
template<typename AbstractStateType>
FiltrationResult AbstractInterpreter<AbstractStateType>::filterByValue(
- AbstractValue& abstractValue, FrozenValue concreteValue)
+ AbstractValue& abstractValue, JSValue concreteValue)
{
if (abstractValue.filterByValue(concreteValue) == FiltrationOK)
return FiltrationOK;
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractValue.cpp b/Source/JavaScriptCore/dfg/DFGAbstractValue.cpp
index 08466aacb..bd1ba4844 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractValue.cpp
+++ b/Source/JavaScriptCore/dfg/DFGAbstractValue.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,164 +29,60 @@
#if ENABLE(DFG_JIT)
#include "DFGGraph.h"
-#include "JSCInlines.h"
-#include "TrackedReferences.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
-void AbstractValue::observeTransitions(const TransitionVector& vector)
+void AbstractValue::setMostSpecific(Graph& graph, JSValue value)
{
- if (m_type & SpecCell) {
- m_structure.observeTransitions(vector);
- ArrayModes newModes = 0;
- for (unsigned i = vector.size(); i--;) {
- if (m_arrayModes & asArrayModes(vector[i].previous->indexingType()))
- newModes |= asArrayModes(vector[i].next->indexingType());
- }
- m_arrayModes |= newModes;
+ if (!!value && value.isCell()) {
+ Structure* structure = value.asCell()->structure();
+ m_currentKnownStructure = structure;
+ setFuturePossibleStructure(graph, structure);
+ m_arrayModes = asArrayModes(structure->indexingType());
+ } else {
+ m_currentKnownStructure.clear();
+ m_futurePossibleStructure.clear();
+ m_arrayModes = 0;
}
+
+ m_type = speculationFromValue(value);
+ m_value = value;
+
checkConsistency();
}
-void AbstractValue::set(Graph& graph, const FrozenValue& value, StructureClobberState clobberState)
+void AbstractValue::set(Graph& graph, JSValue value)
{
- if (!!value && value.value().isCell()) {
- Structure* structure = value.structure();
- if (graph.registerStructure(structure) == StructureRegisteredAndWatched) {
- m_structure = structure;
- if (clobberState == StructuresAreClobbered) {
- m_arrayModes = ALL_ARRAY_MODES;
- m_structure.clobber();
- } else
- m_arrayModes = asArrayModes(structure->indexingType());
- } else {
- m_structure.makeTop();
- m_arrayModes = ALL_ARRAY_MODES;
- }
+ if (!!value && value.isCell()) {
+ m_currentKnownStructure.makeTop();
+ Structure* structure = value.asCell()->structure();
+ setFuturePossibleStructure(graph, structure);
+ m_arrayModes = asArrayModes(structure->indexingType());
+ clobberArrayModes();
} else {
- m_structure.clear();
+ m_currentKnownStructure.clear();
+ m_futurePossibleStructure.clear();
m_arrayModes = 0;
}
-
- m_type = speculationFromValue(value.value());
- m_value = value.value();
-
+
+ m_type = speculationFromValue(value);
+ if (m_type == SpecInt52AsDouble)
+ m_type = SpecInt52;
+ m_value = value;
+
checkConsistency();
- assertIsRegistered(graph);
}
void AbstractValue::set(Graph& graph, Structure* structure)
{
- m_structure = structure;
+ m_currentKnownStructure = structure;
+ setFuturePossibleStructure(graph, structure);
m_arrayModes = asArrayModes(structure->indexingType());
m_type = speculationFromStructure(structure);
m_value = JSValue();
checkConsistency();
- assertIsRegistered(graph);
-}
-
-void AbstractValue::set(Graph& graph, const StructureSet& set)
-{
- m_structure = set;
- m_arrayModes = set.arrayModesFromStructures();
- m_type = set.speculationFromStructures();
- m_value = JSValue();
-
- checkConsistency();
- assertIsRegistered(graph);
-}
-
-void AbstractValue::setType(Graph& graph, SpeculatedType type)
-{
- SpeculatedType cellType = type & SpecCell;
- if (cellType) {
- if (!(cellType & ~SpecString))
- m_structure = graph.m_vm.stringStructure.get();
- else if (isSymbolSpeculation(cellType))
- m_structure = graph.m_vm.symbolStructure.get();
- else
- m_structure.makeTop();
- m_arrayModes = ALL_ARRAY_MODES;
- } else {
- m_structure.clear();
- m_arrayModes = 0;
- }
- m_type = type;
- m_value = JSValue();
- checkConsistency();
-}
-
-void AbstractValue::fixTypeForRepresentation(Graph& graph, NodeFlags representation, Node* node)
-{
- if (representation == NodeResultDouble) {
- if (m_value) {
- ASSERT(m_value.isNumber());
- if (m_value.isInt32())
- m_value = jsDoubleNumber(m_value.asNumber());
- }
- if (m_type & SpecMachineInt) {
- m_type &= ~SpecMachineInt;
- m_type |= SpecInt52AsDouble;
- }
- if (m_type & ~SpecFullDouble)
- DFG_CRASH(graph, node, toCString("Abstract value ", *this, " for double node has type outside SpecFullDouble.\n").data());
- } else if (representation == NodeResultInt52) {
- if (m_type & SpecInt52AsDouble) {
- m_type &= ~SpecInt52AsDouble;
- m_type |= SpecInt52;
- }
- if (m_type & ~SpecMachineInt)
- DFG_CRASH(graph, node, toCString("Abstract value ", *this, " for int52 node has type outside SpecMachineInt.\n").data());
- } else {
- if (m_type & SpecInt52) {
- m_type &= ~SpecInt52;
- m_type |= SpecInt52AsDouble;
- }
- if (m_type & ~SpecBytecodeTop)
- DFG_CRASH(graph, node, toCString("Abstract value ", *this, " for value node has type outside SpecBytecodeTop.\n").data());
- }
-
- checkConsistency();
-}
-
-void AbstractValue::fixTypeForRepresentation(Graph& graph, Node* node)
-{
- fixTypeForRepresentation(graph, node->result(), node);
-}
-
-bool AbstractValue::mergeOSREntryValue(Graph& graph, JSValue value)
-{
- AbstractValue oldMe = *this;
-
- if (isClear()) {
- FrozenValue* frozenValue = graph.freeze(value);
- if (frozenValue->pointsToHeap()) {
- m_structure = frozenValue->structure();
- m_arrayModes = asArrayModes(frozenValue->structure()->indexingType());
- } else {
- m_structure.clear();
- m_arrayModes = 0;
- }
-
- m_type = speculationFromValue(value);
- m_value = value;
- } else {
- mergeSpeculation(m_type, speculationFromValue(value));
- if (!!value && value.isCell()) {
- Structure* structure = value.asCell()->structure();
- graph.registerStructure(structure);
- mergeArrayModes(m_arrayModes, asArrayModes(structure->indexingType()));
- m_structure.merge(StructureSet(structure));
- }
- if (m_value != value)
- m_value = JSValue();
- }
-
- checkConsistency();
- assertIsRegistered(graph);
-
- return oldMe != *this;
}
FiltrationResult AbstractValue::filter(Graph& graph, const StructureSet& other)
@@ -200,29 +96,21 @@ FiltrationResult AbstractValue::filter(Graph& graph, const StructureSet& other)
m_type &= other.speculationFromStructures();
m_arrayModes &= other.arrayModesFromStructures();
- m_structure.filter(other);
+ m_currentKnownStructure.filter(other);
// It's possible that prior to the above two statements we had (Foo, TOP), where
// Foo is a SpeculatedType that is disjoint with the passed StructureSet. In that
// case, we will now have (None, [someStructure]). In general, we need to make
// sure that new information gleaned from the SpeculatedType needs to be fed back
// into the information gleaned from the StructureSet.
- m_structure.filter(m_type);
+ m_currentKnownStructure.filter(m_type);
+ if (m_currentKnownStructure.hasSingleton())
+ setFuturePossibleStructure(graph, m_currentKnownStructure.singleton());
+
filterArrayModesByType();
filterValueByType();
- return normalizeClarity(graph);
-}
-
-FiltrationResult AbstractValue::changeStructure(Graph& graph, const StructureSet& other)
-{
- m_type &= other.speculationFromStructures();
- m_arrayModes = other.arrayModesFromStructures();
- m_structure = other;
-
- filterValueByType();
-
- return normalizeClarity(graph);
+ return normalizeClarity();
}
FiltrationResult AbstractValue::filterArrayModes(ArrayModes arrayModes)
@@ -242,77 +130,34 @@ FiltrationResult AbstractValue::filter(SpeculatedType type)
if ((m_type & type) == m_type)
return FiltrationOK;
- // Fast path for the case that we don't even have a cell.
- if (!(m_type & SpecCell)) {
- m_type &= type;
- FiltrationResult result;
- if (m_type == SpecNone) {
- clear();
- result = Contradiction;
- } else
- result = FiltrationOK;
- checkConsistency();
- return result;
- }
-
m_type &= type;
// It's possible that prior to this filter() call we had, say, (Final, TOP), and
// the passed type is Array. At this point we'll have (None, TOP). The best way
// to ensure that the structure filtering does the right thing is to filter on
// the new type (None) rather than the one passed (Array).
- m_structure.filter(type);
+ m_currentKnownStructure.filter(m_type);
+ m_futurePossibleStructure.filter(m_type);
filterArrayModesByType();
filterValueByType();
return normalizeClarity();
}
-FiltrationResult AbstractValue::filterByValue(const FrozenValue& value)
+FiltrationResult AbstractValue::filterByValue(JSValue value)
{
- FiltrationResult result = filter(speculationFromValue(value.value()));
+ FiltrationResult result = filter(speculationFromValue(value));
if (m_type)
- m_value = value.value();
+ m_value = value;
return result;
}
-bool AbstractValue::contains(Structure* structure) const
+void AbstractValue::setFuturePossibleStructure(Graph& graph, Structure* structure)
{
- return couldBeType(speculationFromStructure(structure))
- && (m_arrayModes & arrayModeFromStructure(structure))
- && m_structure.contains(structure);
-}
-
-FiltrationResult AbstractValue::filter(const AbstractValue& other)
-{
- m_type &= other.m_type;
- m_structure.filter(other.m_structure);
- m_arrayModes &= other.m_arrayModes;
-
- m_structure.filter(m_type);
- filterArrayModesByType();
- filterValueByType();
-
- if (normalizeClarity() == Contradiction)
- return Contradiction;
-
- if (m_value == other.m_value)
- return FiltrationOK;
-
- // Neither of us are BOTTOM, so an empty value means TOP.
- if (!m_value) {
- // We previously didn't prove a value but now we have done so.
- m_value = other.m_value;
- return FiltrationOK;
- }
-
- if (!other.m_value) {
- // We had proved a value but the other guy hadn't, so keep our proof.
- return FiltrationOK;
- }
-
- // We both proved there to be a specific value but they are different.
- clear();
- return Contradiction;
+ ASSERT(structure);
+ if (graph.watchpoints().isStillValid(structure->transitionWatchpointSet()))
+ m_futurePossibleStructure = structure;
+ else
+ m_futurePossibleStructure.makeTop();
}
void AbstractValue::filterValueByType()
@@ -360,7 +205,8 @@ bool AbstractValue::shouldBeClear() const
return true;
if (!(m_type & ~SpecCell)
- && (!m_arrayModes || m_structure.isClear()))
+ && (!m_arrayModes
+ || m_currentKnownStructure.isClear()))
return true;
return false;
@@ -384,18 +230,12 @@ FiltrationResult AbstractValue::normalizeClarity()
return result;
}
-FiltrationResult AbstractValue::normalizeClarity(Graph& graph)
-{
- FiltrationResult result = normalizeClarity();
- assertIsRegistered(graph);
- return result;
-}
-
#if !ASSERT_DISABLED
void AbstractValue::checkConsistency() const
{
if (!(m_type & SpecCell)) {
- ASSERT(m_structure.isClear());
+ ASSERT(m_currentKnownStructure.isClear());
+ ASSERT(m_futurePossibleStructure.isClear());
ASSERT(!m_arrayModes);
}
@@ -404,8 +244,6 @@ void AbstractValue::checkConsistency() const
if (!!m_value) {
SpeculatedType type = m_type;
- // This relaxes the assertion below a bit, since we don't know the representation of the
- // node.
if (type & SpecInt52)
type |= SpecInt52AsDouble;
ASSERT(mergeSpeculations(type, speculationFromValue(m_value)) == type);
@@ -416,11 +254,6 @@ void AbstractValue::checkConsistency() const
// we don't want to get pedantic about this as it would only increase the computational
// complexity of the code.
}
-
-void AbstractValue::assertIsRegistered(Graph& graph) const
-{
- m_structure.assertIsRegistered(graph);
-}
#endif
void AbstractValue::dump(PrintStream& out) const
@@ -434,19 +267,14 @@ void AbstractValue::dumpInContext(PrintStream& out, DumpContext* context) const
if (m_type & SpecCell) {
out.print(
", ", ArrayModesDump(m_arrayModes), ", ",
- inContext(m_structure, context));
+ inContext(m_currentKnownStructure, context), ", ",
+ inContext(m_futurePossibleStructure, context));
}
if (!!m_value)
out.print(", ", inContext(m_value, context));
out.print(")");
}
-void AbstractValue::validateReferences(const TrackedReferences& trackedReferences)
-{
- trackedReferences.check(m_value);
- m_structure.validateReferences(trackedReferences);
-}
-
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractValue.h b/Source/JavaScriptCore/dfg/DFGAbstractValue.h
index 7318d0d44..db313d242 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractValue.h
+++ b/Source/JavaScriptCore/dfg/DFGAbstractValue.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,27 +26,21 @@
#ifndef DFGAbstractValue_h
#define DFGAbstractValue_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "ArrayProfile.h"
#include "DFGFiltrationResult.h"
-#include "DFGFrozenValue.h"
-#include "DFGNodeFlags.h"
#include "DFGStructureAbstractValue.h"
-#include "DFGStructureClobberState.h"
#include "JSCell.h"
#include "SpeculatedType.h"
#include "DumpContext.h"
#include "StructureSet.h"
-namespace JSC {
-
-class TrackedReferences;
-
-namespace DFG {
+namespace JSC { namespace DFG {
class Graph;
-struct Node;
struct AbstractValue {
AbstractValue()
@@ -59,7 +53,8 @@ struct AbstractValue {
{
m_type = SpecNone;
m_arrayModes = 0;
- m_structure.clear();
+ m_currentKnownStructure.clear();
+ m_futurePossibleStructure.clear();
m_value = JSValue();
checkConsistency();
}
@@ -77,82 +72,18 @@ struct AbstractValue {
makeTop(SpecBytecodeTop);
}
- void makeFullTop()
- {
- makeTop(SpecFullTop);
- }
-
void clobberStructures()
{
if (m_type & SpecCell) {
- m_structure.clobber();
+ m_currentKnownStructure.makeTop();
clobberArrayModes();
} else {
- ASSERT(m_structure.isClear());
+ ASSERT(m_currentKnownStructure.isClear());
ASSERT(!m_arrayModes);
}
checkConsistency();
}
-
- static void clobberStructuresFor(AbstractValue& value)
- {
- value.clobberStructures();
- }
-
- void observeInvalidationPoint()
- {
- m_structure.observeInvalidationPoint();
- checkConsistency();
- }
-
- static void observeInvalidationPointFor(AbstractValue& value)
- {
- value.observeInvalidationPoint();
- }
-
- void observeTransition(Structure* from, Structure* to)
- {
- if (m_type & SpecCell) {
- m_structure.observeTransition(from, to);
- observeIndexingTypeTransition(from->indexingType(), to->indexingType());
- }
- checkConsistency();
- }
-
- void observeTransitions(const TransitionVector& vector);
-
- class TransitionObserver {
- public:
- TransitionObserver(Structure* from, Structure* to)
- : m_from(from)
- , m_to(to)
- {
- }
- void operator()(AbstractValue& value)
- {
- value.observeTransition(m_from, m_to);
- }
- private:
- Structure* m_from;
- Structure* m_to;
- };
-
- class TransitionsObserver {
- public:
- TransitionsObserver(const TransitionVector& vector)
- : m_vector(vector)
- {
- }
-
- void operator()(AbstractValue& value)
- {
- value.observeTransitions(m_vector);
- }
- private:
- const TransitionVector& m_vector;
- };
-
void clobberValue()
{
m_value = JSValue();
@@ -160,10 +91,7 @@ struct AbstractValue {
bool isHeapTop() const
{
- return (m_type | SpecHeapTop) == m_type
- && m_structure.isTop()
- && m_arrayModes == ALL_ARRAY_MODES
- && !m_value;
+ return (m_type | SpecHeapTop) == m_type && m_currentKnownStructure.isTop() && m_futurePossibleStructure.isTop();
}
bool valueIsTop() const
@@ -183,46 +111,32 @@ struct AbstractValue {
return result;
}
- static AbstractValue bytecodeTop()
- {
- AbstractValue result;
- result.makeBytecodeTop();
- return result;
- }
-
- static AbstractValue fullTop()
- {
- AbstractValue result;
- result.makeFullTop();
- return result;
- }
-
- void set(Graph&, const FrozenValue&, StructureClobberState);
+ void setMostSpecific(Graph&, JSValue);
+ void set(Graph&, JSValue);
void set(Graph&, Structure*);
- void set(Graph&, const StructureSet&);
-
- // Set this value to represent the given set of types as precisely as possible.
- void setType(Graph&, SpeculatedType);
- // As above, but only valid for non-cell types.
void setType(SpeculatedType type)
{
- RELEASE_ASSERT(!(type & SpecCell));
- m_structure.clear();
- m_arrayModes = 0;
+ if (type & SpecCell) {
+ m_currentKnownStructure.makeTop();
+ m_futurePossibleStructure.makeTop();
+ m_arrayModes = ALL_ARRAY_MODES;
+ } else {
+ m_currentKnownStructure.clear();
+ m_futurePossibleStructure.clear();
+ m_arrayModes = 0;
+ }
m_type = type;
m_value = JSValue();
checkConsistency();
}
- void fixTypeForRepresentation(Graph&, NodeFlags representation, Node* = nullptr);
- void fixTypeForRepresentation(Graph&, Node*);
-
bool operator==(const AbstractValue& other) const
{
return m_type == other.m_type
&& m_arrayModes == other.m_arrayModes
- && m_structure == other.m_structure
+ && m_currentKnownStructure == other.m_currentKnownStructure
+ && m_futurePossibleStructure == other.m_futurePossibleStructure
&& m_value == other.m_value;
}
bool operator!=(const AbstractValue& other) const
@@ -245,7 +159,8 @@ struct AbstractValue {
} else {
result |= mergeSpeculation(m_type, other.m_type);
result |= mergeArrayModes(m_arrayModes, other.m_arrayModes);
- result |= m_structure.merge(other.m_structure);
+ result |= m_currentKnownStructure.addAll(other.m_currentKnownStructure);
+ result |= m_futurePossibleStructure.addAll(other.m_futurePossibleStructure);
if (m_value != other.m_value) {
result |= !!m_value;
m_value = JSValue();
@@ -256,14 +171,13 @@ struct AbstractValue {
return result;
}
- bool mergeOSREntryValue(Graph&, JSValue);
-
void merge(SpeculatedType type)
{
mergeSpeculation(m_type, type);
if (type & SpecCell) {
- m_structure.makeTop();
+ m_currentKnownStructure.makeTop();
+ m_futurePossibleStructure.makeTop();
m_arrayModes = ALL_ARRAY_MODES;
}
m_value = JSValue();
@@ -271,25 +185,23 @@ struct AbstractValue {
checkConsistency();
}
- bool couldBeType(SpeculatedType desiredType) const
+ bool couldBeType(SpeculatedType desiredType)
{
return !!(m_type & desiredType);
}
- bool isType(SpeculatedType desiredType) const
+ bool isType(SpeculatedType desiredType)
{
return !(m_type & ~desiredType);
}
FiltrationResult filter(Graph&, const StructureSet&);
- FiltrationResult filterArrayModes(ArrayModes);
- FiltrationResult filter(SpeculatedType);
- FiltrationResult filterByValue(const FrozenValue& value);
- FiltrationResult filter(const AbstractValue&);
- FiltrationResult changeStructure(Graph&, const StructureSet&);
+ FiltrationResult filterArrayModes(ArrayModes arrayModes);
- bool contains(Structure*) const;
+ FiltrationResult filter(SpeculatedType type);
+
+ FiltrationResult filterByValue(JSValue value);
bool validate(JSValue value) const
{
@@ -310,32 +222,75 @@ struct AbstractValue {
if (!!value && value.isCell()) {
ASSERT(m_type & SpecCell);
Structure* structure = value.asCell()->structure();
- return m_structure.contains(structure)
+ return m_currentKnownStructure.contains(structure)
+ && m_futurePossibleStructure.contains(structure)
&& (m_arrayModes & asArrayModes(structure->indexingType()));
}
return true;
}
+ Structure* bestProvenStructure() const
+ {
+ if (m_currentKnownStructure.hasSingleton())
+ return m_currentKnownStructure.singleton();
+ if (m_futurePossibleStructure.hasSingleton())
+ return m_futurePossibleStructure.singleton();
+ return 0;
+ }
+
bool hasClobberableState() const
{
- return m_structure.isNeitherClearNorTop()
+ return m_currentKnownStructure.isNeitherClearNorTop()
|| !arrayModesAreClearOrTop(m_arrayModes);
}
#if ASSERT_DISABLED
void checkConsistency() const { }
- void assertIsRegistered(Graph&) const { }
#else
void checkConsistency() const;
- void assertIsRegistered(Graph&) const;
#endif
void dumpInContext(PrintStream&, DumpContext*) const;
void dump(PrintStream&) const;
- void validateReferences(const TrackedReferences&);
-
+ // A great way to think about the difference between m_currentKnownStructure and
+ // m_futurePossibleStructure is to consider these four examples:
+ //
+ // 1) x = foo();
+ //
+ // In this case x's m_currentKnownStructure and m_futurePossibleStructure will
+ // both be TOP, since we don't know anything about x for sure, yet.
+ //
+ // 2) x = foo();
+ // y = x.f;
+ //
+ // Where x will later have a new property added to it, 'g'. Because of the
+ // known but not-yet-executed property addition, x's current structure will
+ // not be watchpointable; hence we have no way of statically bounding the set
+ // of possible structures that x may have if a clobbering event happens. So,
+ // x's m_currentKnownStructure will be whatever structure we check to get
+ // property 'f', and m_futurePossibleStructure will be TOP.
+ //
+ // 3) x = foo();
+ // y = x.f;
+ //
+ // Where x has a terminal structure that is still watchpointable. In this case,
+ // x's m_currentKnownStructure and m_futurePossibleStructure will both be
+ // whatever structure we checked for when getting 'f'.
+ //
+ // 4) x = foo();
+ // y = x.f;
+ // bar();
+ //
+ // Where x has a terminal structure that is still watchpointable. In this
+ // case, m_currentKnownStructure will be TOP because bar() may potentially
+ // change x's structure and we have no way of proving otherwise, but
+ // x's m_futurePossibleStructure will be whatever structure we had checked
+ // when getting property 'f'.
+
+ // NB. All fields in this struct must have trivial destructors.
+
// This is a proven constraint on the structures that this value can have right
// now. The structure of the current value must belong to this set. The set may
// be TOP, indicating that it is the set of all possible structures, in which
@@ -343,25 +298,44 @@ struct AbstractValue {
// in which case this value cannot be a cell. This is all subject to change
// anytime a new value is assigned to this one, anytime there is a control flow
// merge, or most crucially, anytime a side-effect or structure check happens.
- // In case of a side-effect, we must assume that any value with a structure that
- // isn't being watched may have had its structure changed, hence contravening
- // our proof. In such a case we make the proof valid again by switching this to
- // TOP (i.e. claiming that we have proved that this value may have any
- // structure).
- StructureAbstractValue m_structure;
+ // In case of a side-effect, we typically must assume that any value may have
+ // had its structure changed, hence contravening our proof. We make the proof
+ // valid again by switching this to TOP (i.e. claiming that we have proved that
+ // this value may have any structure). Of note is that the proof represented by
+ // this field is not subject to structure transition watchpoints - even if one
+ // fires, we can be sure that this proof is still valid.
+ StructureAbstractValue m_currentKnownStructure;
+
+ // This is a proven constraint on the structures that this value can have now
+ // or any time in the future subject to the structure transition watchpoints of
+ // all members of this set not having fired. This set is impervious to side-
+ // effects; even if one happens the side-effect can only cause the value to
+ // change to at worst another structure that is also a member of this set. But,
+ // the theorem being proved by this field is predicated upon there not being
+ // any new structure transitions introduced into any members of this set. In
+ // cases where there is no way for us to guard this happening, the set must be
+ // TOP. But in cases where we can guard new structure transitions (all members
+ // of the set have still-valid structure transition watchpoints) then this set
+ // will be finite. Anytime that we make use of the finite nature of this set,
+ // we must first issue a structure transition watchpoint, which will effectively
+ // result in m_currentKnownStructure being filtered according to
+ // m_futurePossibleStructure.
+ StructureAbstractValue m_futurePossibleStructure;
// This is a proven constraint on the possible types that this value can have
// now or any time in the future, unless it is reassigned. This field is
- // impervious to side-effects. The relationship between this field, and the
- // structure fields above, is as follows. The fields above constraint the
- // structures that a cell may have, but they say nothing about whether or not
- // the value is known to be a cell. More formally, the m_structure is itself an
- // abstract value that consists of the union of the set of all non-cell values
- // and the set of cell values that have the given structure. This abstract
- // value is then the intersection of the m_structure and the set of values
- // whose type is m_type. So, for example if m_type is SpecFinal|SpecInt32 and
- // m_structure is [0x12345] then this abstract value corresponds to the set of
- // all integers unified with the set of all objects with structure 0x12345.
+ // impervious to side-effects unless the side-effect can reassign the value
+ // (for example if we're talking about a captured variable). The relationship
+ // between this field, and the structure fields above, is as follows. The
+ // fields above constraint the structures that a cell may have, but they say
+ // nothing about whether or not the value is known to be a cell. More formally,
+ // the m_currentKnownStructure is itself an abstract value that consists of the
+ // union of the set of all non-cell values and the set of cell values that have
+ // the given structure. This abstract value is then the intersection of the
+ // m_currentKnownStructure and the set of values whose type is m_type. So, for
+ // example if m_type is SpecFinal|SpecInt32 and m_currentKnownStructure is
+ // [0x12345] then this abstract value corresponds to the set of all integers
+ // unified with the set of all objects with structure 0x12345.
SpeculatedType m_type;
// This is a proven constraint on the possible indexing types that this value
@@ -376,11 +350,7 @@ struct AbstractValue {
// implies nothing about the structure. Oddly, JSValue() (i.e. the empty value)
// means either BOTTOM or TOP depending on the state of m_type: if m_type is
// BOTTOM then JSValue() means BOTTOM; if m_type is not BOTTOM then JSValue()
- // means TOP. Also note that this value isn't necessarily known to the GC
- // (strongly or even weakly - it may be an "fragile" value, see
- // DFGValueStrength.h). If you perform any optimization based on a cell m_value
- // that requires that the value be kept alive, you must call freeze() on that
- // value, which will turn it into a weak value.
+ // means TOP.
JSValue m_value;
private:
@@ -391,12 +361,6 @@ private:
m_arrayModes = ALL_ARRAY_MODES;
}
- void observeIndexingTypeTransition(IndexingType from, IndexingType to)
- {
- if (m_arrayModes & asArrayModes(from))
- m_arrayModes |= asArrayModes(to);
- }
-
bool validateType(JSValue value) const
{
if (isHeapTop())
@@ -424,17 +388,19 @@ private:
{
m_type |= top;
m_arrayModes = ALL_ARRAY_MODES;
- m_structure.makeTop();
+ m_currentKnownStructure.makeTop();
+ m_futurePossibleStructure.makeTop();
m_value = JSValue();
checkConsistency();
}
+ void setFuturePossibleStructure(Graph&, Structure* structure);
+
void filterValueByType();
void filterArrayModesByType();
bool shouldBeClear() const;
FiltrationResult normalizeClarity();
- FiltrationResult normalizeClarity(Graph&);
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGAdaptiveInferredPropertyValueWatchpoint.cpp b/Source/JavaScriptCore/dfg/DFGAdaptiveInferredPropertyValueWatchpoint.cpp
deleted file mode 100644
index 2038a193e..000000000
--- a/Source/JavaScriptCore/dfg/DFGAdaptiveInferredPropertyValueWatchpoint.cpp
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGAdaptiveInferredPropertyValueWatchpoint.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "JSCInlines.h"
-
-namespace JSC { namespace DFG {
-
-AdaptiveInferredPropertyValueWatchpoint::AdaptiveInferredPropertyValueWatchpoint(
- const ObjectPropertyCondition& key,
- CodeBlock* codeBlock)
- : m_key(key)
- , m_codeBlock(codeBlock)
-{
- RELEASE_ASSERT(key.kind() == PropertyCondition::Equivalence);
-}
-
-void AdaptiveInferredPropertyValueWatchpoint::install()
-{
- RELEASE_ASSERT(m_key.isWatchable());
-
- m_key.object()->structure()->addTransitionWatchpoint(&m_structureWatchpoint);
-
- PropertyOffset offset = m_key.object()->structure()->getConcurrently(m_key.uid());
- WatchpointSet* set = m_key.object()->structure()->propertyReplacementWatchpointSet(offset);
- set->add(&m_propertyWatchpoint);
-}
-
-void AdaptiveInferredPropertyValueWatchpoint::fire(const FireDetail& detail)
-{
- // One of the watchpoints fired, but the other one didn't. Make sure that neither of them are
- // in any set anymore. This simplifies things by allowing us to reinstall the watchpoints
- // wherever from scratch.
- if (m_structureWatchpoint.isOnList())
- m_structureWatchpoint.remove();
- if (m_propertyWatchpoint.isOnList())
- m_propertyWatchpoint.remove();
-
- if (m_key.isWatchable(PropertyCondition::EnsureWatchability)) {
- install();
- return;
- }
-
- if (DFG::shouldShowDisassembly()) {
- dataLog(
- "Firing watchpoint ", RawPointer(this), " (", m_key, ") on ", *m_codeBlock, "\n");
- }
-
- StringPrintStream out;
- out.print("Adaptation of ", m_key, " failed: ", detail);
-
- StringFireDetail stringDetail(out.toCString().data());
-
- m_codeBlock->jettison(
- Profiler::JettisonDueToUnprofiledWatchpoint, CountReoptimization, &stringDetail);
-}
-
-void AdaptiveInferredPropertyValueWatchpoint::StructureWatchpoint::fireInternal(
- const FireDetail& detail)
-{
- ptrdiff_t myOffset = OBJECT_OFFSETOF(
- AdaptiveInferredPropertyValueWatchpoint, m_structureWatchpoint);
-
- AdaptiveInferredPropertyValueWatchpoint* parent =
- bitwise_cast<AdaptiveInferredPropertyValueWatchpoint*>(
- bitwise_cast<char*>(this) - myOffset);
-
- parent->fire(detail);
-}
-
-void AdaptiveInferredPropertyValueWatchpoint::PropertyWatchpoint::fireInternal(
- const FireDetail& detail)
-{
- ptrdiff_t myOffset = OBJECT_OFFSETOF(
- AdaptiveInferredPropertyValueWatchpoint, m_propertyWatchpoint);
-
- AdaptiveInferredPropertyValueWatchpoint* parent =
- bitwise_cast<AdaptiveInferredPropertyValueWatchpoint*>(
- bitwise_cast<char*>(this) - myOffset);
-
- parent->fire(detail);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGAdaptiveInferredPropertyValueWatchpoint.h b/Source/JavaScriptCore/dfg/DFGAdaptiveInferredPropertyValueWatchpoint.h
deleted file mode 100644
index 28ab3a75f..000000000
--- a/Source/JavaScriptCore/dfg/DFGAdaptiveInferredPropertyValueWatchpoint.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGAdaptiveInferredPropertyValueWatchpoint_h
-#define DFGAdaptiveInferredPropertyValueWatchpoint_h
-
-#if ENABLE(DFG_JIT)
-
-#include "ObjectPropertyCondition.h"
-#include "Watchpoint.h"
-#include <wtf/FastMalloc.h>
-#include <wtf/Noncopyable.h>
-
-namespace JSC { namespace DFG {
-
-class AdaptiveInferredPropertyValueWatchpoint {
- WTF_MAKE_NONCOPYABLE(AdaptiveInferredPropertyValueWatchpoint);
- WTF_MAKE_FAST_ALLOCATED;
-
-public:
- AdaptiveInferredPropertyValueWatchpoint(const ObjectPropertyCondition&, CodeBlock*);
-
- const ObjectPropertyCondition& key() const { return m_key; }
-
- void install();
-
-private:
- class StructureWatchpoint : public Watchpoint {
- public:
- StructureWatchpoint() { }
- protected:
- virtual void fireInternal(const FireDetail&) override;
- };
- class PropertyWatchpoint : public Watchpoint {
- public:
- PropertyWatchpoint() { }
- protected:
- virtual void fireInternal(const FireDetail&) override;
- };
-
- void fire(const FireDetail&);
-
- ObjectPropertyCondition m_key;
- CodeBlock* m_codeBlock;
- StructureWatchpoint m_structureWatchpoint;
- PropertyWatchpoint m_propertyWatchpoint;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGAdaptiveInferredPropertyValueWatchpoint_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.cpp b/Source/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.cpp
deleted file mode 100644
index 6bc93b52a..000000000
--- a/Source/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGAdaptiveStructureWatchpoint.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "JSCInlines.h"
-
-namespace JSC { namespace DFG {
-
-AdaptiveStructureWatchpoint::AdaptiveStructureWatchpoint(
- const ObjectPropertyCondition& key,
- CodeBlock* codeBlock)
- : m_key(key)
- , m_codeBlock(codeBlock)
-{
- RELEASE_ASSERT(key.watchingRequiresStructureTransitionWatchpoint());
- RELEASE_ASSERT(!key.watchingRequiresReplacementWatchpoint());
-}
-
-void AdaptiveStructureWatchpoint::install()
-{
- RELEASE_ASSERT(m_key.isWatchable());
-
- m_key.object()->structure()->addTransitionWatchpoint(this);
-}
-
-void AdaptiveStructureWatchpoint::fireInternal(const FireDetail& detail)
-{
- if (m_key.isWatchable(PropertyCondition::EnsureWatchability)) {
- install();
- return;
- }
-
- if (DFG::shouldShowDisassembly()) {
- dataLog(
- "Firing watchpoint ", RawPointer(this), " (", m_key, ") on ", *m_codeBlock, "\n");
- }
-
- StringPrintStream out;
- out.print("Adaptation of ", m_key, " failed: ", detail);
-
- StringFireDetail stringDetail(out.toCString().data());
-
- m_codeBlock->jettison(
- Profiler::JettisonDueToUnprofiledWatchpoint, CountReoptimization, &stringDetail);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.h b/Source/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.h
deleted file mode 100644
index f153e23c5..000000000
--- a/Source/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGAdaptiveStructureWatchpoint_h
-#define DFGAdaptiveStructureWatchpoint_h
-
-#if ENABLE(DFG_JIT)
-
-#include "ObjectPropertyCondition.h"
-#include "Watchpoint.h"
-
-namespace JSC { namespace DFG {
-
-class AdaptiveStructureWatchpoint : public Watchpoint {
-public:
- AdaptiveStructureWatchpoint(const ObjectPropertyCondition&, CodeBlock*);
-
- const ObjectPropertyCondition& key() const { return m_key; }
-
- void install();
-
-protected:
- virtual void fireInternal(const FireDetail&) override;
-
-private:
- ObjectPropertyCondition m_key;
- CodeBlock* m_codeBlock;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGAdaptiveStructureWatchpoint_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGAdjacencyList.h b/Source/JavaScriptCore/dfg/DFGAdjacencyList.h
index 63ebef5fa..dc3cccb3f 100644
--- a/Source/JavaScriptCore/dfg/DFGAdjacencyList.h
+++ b/Source/JavaScriptCore/dfg/DFGAdjacencyList.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGAdjacencyList_h
#define DFGAdjacencyList_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
@@ -52,7 +54,7 @@ public:
}
}
- AdjacencyList(Kind kind, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
+ AdjacencyList(Kind kind, Edge child1, Edge child2, Edge child3)
{
ASSERT_UNUSED(kind, kind == Fixed);
initialize(child1, child2, child3);
@@ -65,8 +67,6 @@ public:
setNumChildren(numChildren);
}
- bool isEmpty() const { return !child1(); }
-
const Edge& child(unsigned i) const
{
ASSERT(i < Size);
@@ -132,7 +132,7 @@ public:
setChild(i, child(i + 1));
setChild(Size - 1, Edge());
}
-
+
unsigned firstChild() const
{
return m_words[0].m_encodedWord;
@@ -151,56 +151,6 @@ public:
m_words[1].m_encodedWord = numChildren;
}
- AdjacencyList sanitized() const
- {
- return AdjacencyList(Fixed, child1().sanitized(), child2().sanitized(), child3().sanitized());
- }
-
- AdjacencyList justChecks() const
- {
- AdjacencyList result(Fixed);
- unsigned sourceIndex = 0;
- unsigned targetIndex = 0;
- while (sourceIndex < AdjacencyList::Size) {
- Edge edge = child(sourceIndex++);
- if (!edge)
- break;
- if (edge.willHaveCheck())
- result.child(targetIndex++) = edge;
- }
- return result;
- }
-
- unsigned hash() const
- {
- unsigned result = 0;
- if (!child1())
- return result;
-
- result += child1().hash();
-
- if (!child2())
- return result;
-
- result *= 3;
- result += child2().hash();
-
- if (!child3())
- return result;
-
- result *= 3;
- result += child3().hash();
-
- return result;
- }
-
- bool operator==(const AdjacencyList& other) const
- {
- return child1() == other.child1()
- && child2() == other.child2()
- && child3() == other.child3();
- }
-
private:
Edge m_words[Size];
};
diff --git a/Source/JavaScriptCore/dfg/DFGAllocator.h b/Source/JavaScriptCore/dfg/DFGAllocator.h
index f380df001..80e1034cf 100644
--- a/Source/JavaScriptCore/dfg/DFGAllocator.h
+++ b/Source/JavaScriptCore/dfg/DFGAllocator.h
@@ -26,9 +26,12 @@
#ifndef DFGAllocator_h
#define DFGAllocator_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
+#include <wtf/PageAllocationAligned.h>
#include <wtf/StdLibExtras.h>
namespace JSC { namespace DFG {
@@ -49,7 +52,7 @@ public:
void* allocate(); // Use placement new to allocate, and avoid using this method.
void free(T*); // Call this method to delete; never use 'delete' directly.
- void freeAll(); // Only call this if you've either freed everything or if T has a trivial destructor.
+ void freeAll(); // Only call this if T has a trivial destructor.
void reset(); // Like freeAll(), but also returns all memory to the OS.
unsigned indexOf(const T*);
@@ -69,7 +72,7 @@ private:
bool isInThisRegion(const T* pointer) { return static_cast<unsigned>(pointer - data()) < numberOfThingsPerRegion(); }
static Region* regionFor(const T* pointer) { return bitwise_cast<Region*>(bitwise_cast<uintptr_t>(pointer) & ~(size() - 1)); }
- void* m_allocation;
+ PageAllocationAligned m_allocation;
Allocator* m_allocator;
Region* m_next;
};
@@ -152,14 +155,11 @@ void Allocator<T>::reset()
template<typename T>
unsigned Allocator<T>::indexOf(const T* object)
{
- unsigned numRegions = 0;
- for (Region* region = m_regionHead; region; region = region->m_next)
- numRegions++;
- unsigned regionIndex = 0;
+ unsigned baseIndex = 0;
for (Region* region = m_regionHead; region; region = region->m_next) {
if (region->isInThisRegion(object))
- return (numRegions - 1 - regionIndex) * Region::numberOfThingsPerRegion() + (object - region->data());
- regionIndex++;
+ return baseIndex + (object - region->data());
+ baseIndex += Region::numberOfThingsPerRegion();
}
CRASH();
return 0;
@@ -200,9 +200,11 @@ void* Allocator<T>::allocateSlow()
if (logCompilationChanges())
dataLog("Allocating another allocator region.\n");
-
- void* allocation = fastAlignedMalloc(Region::size(), Region::size());
- Region* region = static_cast<Region*>(allocation);
+
+ PageAllocationAligned allocation = PageAllocationAligned::allocate(Region::size(), Region::size(), OSAllocator::JSGCHeapPages);
+ if (!static_cast<bool>(allocation))
+ CRASH();
+ Region* region = static_cast<Region*>(allocation.base());
region->m_allocation = allocation;
region->m_allocator = this;
startBumpingIn(region);
@@ -219,7 +221,7 @@ void Allocator<T>::freeRegionsStartingAt(typename Allocator<T>::Region* region)
{
while (region) {
Region* nextRegion = region->m_next;
- fastAlignedFree(region->m_allocation);
+ region->m_allocation.deallocate();
region = nextRegion;
}
}
diff --git a/Source/JavaScriptCore/dfg/DFGAnalysis.h b/Source/JavaScriptCore/dfg/DFGAnalysis.h
index 0df93d1d1..1a49a8f51 100644
--- a/Source/JavaScriptCore/dfg/DFGAnalysis.h
+++ b/Source/JavaScriptCore/dfg/DFGAnalysis.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGAnalysis_h
#define DFGAnalysis_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
@@ -53,8 +55,6 @@ public:
{
if (m_valid)
return;
- // It's best to run dependent analyses from this method.
- static_cast<T*>(this)->computeDependencies(graph);
// Set to true early, since the analysis may choose to call its own methods in
// compute() and it may want to ASSERT() validity in those methods.
m_valid = true;
@@ -63,12 +63,6 @@ public:
bool isValid() const { return m_valid; }
- // Override this to compute any dependent analyses. See
- // NaturalLoops::computeDependencies(Graph&) for an example. This isn't strictly necessary but
- // it makes debug dumps in cases of error work a bit better because this analysis wouldn't yet
- // be pretending to be valid.
- void computeDependencies(Graph&) { }
-
private:
bool m_valid;
};
diff --git a/Source/JavaScriptCore/dfg/DFGArgumentPosition.h b/Source/JavaScriptCore/dfg/DFGArgumentPosition.h
index a6983a735..b4e4ade15 100644
--- a/Source/JavaScriptCore/dfg/DFGArgumentPosition.h
+++ b/Source/JavaScriptCore/dfg/DFGArgumentPosition.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -46,9 +46,6 @@ public:
void addVariable(VariableAccessData* variable)
{
m_variables.append(variable);
-
- // We may set this early. Merging it here saves us time in prediction propagation.
- variable->mergeShouldNeverUnbox(m_shouldNeverUnbox);
}
VariableAccessData* someVariable() const
@@ -67,7 +64,7 @@ public:
bool mergeShouldNeverUnbox(bool shouldNeverUnbox)
{
- return checkAndSet(m_shouldNeverUnbox, m_shouldNeverUnbox || shouldNeverUnbox);
+ return checkAndSet(m_shouldNeverUnbox, m_shouldNeverUnbox | shouldNeverUnbox);
}
bool mergeArgumentPredictionAwareness()
@@ -96,7 +93,7 @@ public:
bool changed = false;
for (unsigned i = 0; i < m_variables.size(); ++i) {
VariableAccessData* variable = m_variables[i]->find();
- changed |= checkAndSet(m_isProfitableToUnbox, m_isProfitableToUnbox || variable->isProfitableToUnbox());
+ changed |= checkAndSet(m_isProfitableToUnbox, m_isProfitableToUnbox | variable->isProfitableToUnbox());
}
if (!changed)
return false;
@@ -126,7 +123,10 @@ public:
if (i)
out.print(" ");
- out.print(operand, "(", VariableAccessDataDump(*graph, variable), ")");
+ if (operand.isArgument())
+ out.print("arg", operand.toArgument(), "(", VariableAccessDataDump(*graph, variable), ")");
+ else
+ out.print("r", operand.toLocal(), "(", VariableAccessDataDump(*graph, variable), ")");
}
out.print("\n");
}
diff --git a/Source/JavaScriptCore/dfg/DFGArgumentsEliminationPhase.cpp b/Source/JavaScriptCore/dfg/DFGArgumentsEliminationPhase.cpp
deleted file mode 100644
index a9ad48711..000000000
--- a/Source/JavaScriptCore/dfg/DFGArgumentsEliminationPhase.cpp
+++ /dev/null
@@ -1,625 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGArgumentsEliminationPhase.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "BytecodeLivenessAnalysisInlines.h"
-#include "DFGArgumentsUtilities.h"
-#include "DFGBasicBlockInlines.h"
-#include "DFGBlockMapInlines.h"
-#include "DFGClobberize.h"
-#include "DFGCombinedLiveness.h"
-#include "DFGForAllKills.h"
-#include "DFGGraph.h"
-#include "DFGInsertionSet.h"
-#include "DFGLivenessAnalysisPhase.h"
-#include "DFGOSRAvailabilityAnalysisPhase.h"
-#include "DFGPhase.h"
-#include "JSCInlines.h"
-#include <wtf/HashMap.h>
-#include <wtf/HashSet.h>
-#include <wtf/ListDump.h>
-
-namespace JSC { namespace DFG {
-
-namespace {
-
-bool verbose = false;
-
-class ArgumentsEliminationPhase : public Phase {
-public:
- ArgumentsEliminationPhase(Graph& graph)
- : Phase(graph, "arguments elimination")
- {
- }
-
- bool run()
- {
- // For now this phase only works on SSA. This could be changed; we could have a block-local
- // version over LoadStore.
- DFG_ASSERT(m_graph, nullptr, m_graph.m_form == SSA);
-
- if (verbose) {
- dataLog("Graph before arguments elimination:\n");
- m_graph.dump();
- }
-
- identifyCandidates();
- if (m_candidates.isEmpty())
- return false;
-
- eliminateCandidatesThatEscape();
- if (m_candidates.isEmpty())
- return false;
-
- eliminateCandidatesThatInterfere();
- if (m_candidates.isEmpty())
- return false;
-
- transform();
-
- return true;
- }
-
-private:
- // Just finds nodes that we know how to work with.
- void identifyCandidates()
- {
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- for (Node* node : *block) {
- switch (node->op()) {
- case CreateDirectArguments:
- case CreateClonedArguments:
- m_candidates.add(node);
- break;
-
- case CreateScopedArguments:
- // FIXME: We could handle this if it wasn't for the fact that scoped arguments are
- // always stored into the activation.
- // https://bugs.webkit.org/show_bug.cgi?id=143072 and
- // https://bugs.webkit.org/show_bug.cgi?id=143073
- break;
-
- default:
- break;
- }
- }
- }
-
- if (verbose)
- dataLog("Candidates: ", listDump(m_candidates), "\n");
- }
-
- // Look for escaping sites, and remove from the candidates set if we see an escape.
- void eliminateCandidatesThatEscape()
- {
- auto escape = [&] (Edge edge) {
- if (!edge)
- return;
- m_candidates.remove(edge.node());
- };
-
- auto escapeBasedOnArrayMode = [&] (ArrayMode mode, Edge edge) {
- switch (mode.type()) {
- case Array::DirectArguments:
- if (edge->op() != CreateDirectArguments)
- escape(edge);
- break;
-
- case Array::Int32:
- case Array::Double:
- case Array::Contiguous:
- if (edge->op() != CreateClonedArguments)
- escape(edge);
- break;
-
- default:
- escape(edge);
- break;
- }
- };
-
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- for (Node* node : *block) {
- switch (node->op()) {
- case GetFromArguments:
- DFG_ASSERT(m_graph, node, node->child1()->op() == CreateDirectArguments);
- break;
-
- case GetByVal:
- escapeBasedOnArrayMode(node->arrayMode(), node->child1());
- escape(node->child2());
- escape(node->child3());
- break;
-
- case GetArrayLength:
- escapeBasedOnArrayMode(node->arrayMode(), node->child1());
- escape(node->child2());
- break;
-
- case LoadVarargs:
- break;
-
- case CallVarargs:
- case ConstructVarargs:
- escape(node->child1());
- escape(node->child3());
- break;
-
- case Check:
- m_graph.doToChildren(
- node,
- [&] (Edge edge) {
- if (edge.willNotHaveCheck())
- return;
-
- if (alreadyChecked(edge.useKind(), SpecObject))
- return;
-
- escape(edge);
- });
- break;
-
- case MovHint:
- case PutHint:
- break;
-
- case GetButterfly:
- // This barely works. The danger is that the GetButterfly is used by something that
- // does something escaping to a candidate. Fortunately, the only butterfly-using ops
- // that we exempt here also use the candidate directly. If there ever was a
- // butterfly-using op that we wanted to exempt, then we'd have to look at the
- // butterfly's child and check if it's a candidate.
- break;
-
- case CheckArray:
- escapeBasedOnArrayMode(node->arrayMode(), node->child1());
- break;
-
- // FIXME: For cloned arguments, we'd like to allow GetByOffset on length to not be
- // an escape.
- // https://bugs.webkit.org/show_bug.cgi?id=143074
-
- // FIXME: We should be able to handle GetById/GetByOffset on callee.
- // https://bugs.webkit.org/show_bug.cgi?id=143075
-
- default:
- m_graph.doToChildren(node, escape);
- break;
- }
- }
- }
-
- if (verbose)
- dataLog("After escape analysis: ", listDump(m_candidates), "\n");
- }
-
- // Anywhere that a candidate is live (in bytecode or in DFG), check if there is a chance of
- // interference between the stack area that the arguments object copies from and the arguments
- // object's payload. Conservatively this means that the stack region doesn't get stored to.
- void eliminateCandidatesThatInterfere()
- {
- performLivenessAnalysis(m_graph);
- performOSRAvailabilityAnalysis(m_graph);
- m_graph.initializeNodeOwners();
- CombinedLiveness combinedLiveness(m_graph);
-
- BlockMap<Operands<bool>> clobberedByBlock(m_graph);
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- Operands<bool>& clobberedByThisBlock = clobberedByBlock[block];
- clobberedByThisBlock = Operands<bool>(OperandsLike, m_graph.block(0)->variablesAtHead);
- for (Node* node : *block) {
- clobberize(
- m_graph, node, NoOpClobberize(),
- [&] (AbstractHeap heap) {
- if (heap.kind() != Stack) {
- ASSERT(!heap.overlaps(Stack));
- return;
- }
- ASSERT(!heap.payload().isTop());
- VirtualRegister reg(heap.payload().value32());
- clobberedByThisBlock.operand(reg) = true;
- },
- NoOpClobberize());
- }
- }
-
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- // Stop if we've already removed all candidates.
- if (m_candidates.isEmpty())
- return;
-
- // Ignore blocks that don't write to the stack.
- bool writesToStack = false;
- for (unsigned i = clobberedByBlock[block].size(); i--;) {
- if (clobberedByBlock[block][i]) {
- writesToStack = true;
- break;
- }
- }
- if (!writesToStack)
- continue;
-
- forAllKillsInBlock(
- m_graph, combinedLiveness, block,
- [&] (unsigned nodeIndex, Node* candidate) {
- if (!m_candidates.contains(candidate))
- return;
-
- // Check if this block has any clobbers that affect this candidate. This is a fairly
- // fast check.
- bool isClobberedByBlock = false;
- Operands<bool>& clobberedByThisBlock = clobberedByBlock[block];
-
- if (InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame) {
- if (inlineCallFrame->isVarargs()) {
- isClobberedByBlock |= clobberedByThisBlock.operand(
- inlineCallFrame->stackOffset + JSStack::ArgumentCount);
- }
-
- if (!isClobberedByBlock || inlineCallFrame->isClosureCall) {
- isClobberedByBlock |= clobberedByThisBlock.operand(
- inlineCallFrame->stackOffset + JSStack::Callee);
- }
-
- if (!isClobberedByBlock) {
- for (unsigned i = 0; i < inlineCallFrame->arguments.size() - 1; ++i) {
- VirtualRegister reg =
- VirtualRegister(inlineCallFrame->stackOffset) +
- CallFrame::argumentOffset(i);
- if (clobberedByThisBlock.operand(reg)) {
- isClobberedByBlock = true;
- break;
- }
- }
- }
- } else {
- // We don't include the ArgumentCount or Callee in this case because we can be
- // damn sure that this won't be clobbered.
- for (unsigned i = 1; i < static_cast<unsigned>(codeBlock()->numParameters()); ++i) {
- if (clobberedByThisBlock.argument(i)) {
- isClobberedByBlock = true;
- break;
- }
- }
- }
-
- if (!isClobberedByBlock)
- return;
-
- // Check if we can immediately eliminate this candidate. If the block has a clobber
- // for this arguments allocation, and we'd have to examine every node in the block,
- // then we can just eliminate the candidate.
- if (nodeIndex == block->size() && candidate->owner != block) {
- m_candidates.remove(candidate);
- return;
- }
-
- // This loop considers all nodes up to the nodeIndex, excluding the nodeIndex.
- while (nodeIndex--) {
- Node* node = block->at(nodeIndex);
- if (node == candidate)
- break;
-
- bool found = false;
- clobberize(
- m_graph, node, NoOpClobberize(),
- [&] (AbstractHeap heap) {
- if (heap.kind() == Stack && !heap.payload().isTop()) {
- if (argumentsInvolveStackSlot(candidate, VirtualRegister(heap.payload().value32())))
- found = true;
- return;
- }
- if (heap.overlaps(Stack))
- found = true;
- },
- NoOpClobberize());
-
- if (found) {
- m_candidates.remove(candidate);
- return;
- }
- }
- });
- }
-
- // Q: How do we handle OSR exit with a live PhantomArguments at a point where the inline call
- // frame is dead? A: Naively we could say that PhantomArguments must escape the stack slots. But
- // that would break PutStack sinking, which in turn would break object allocation sinking, in
- // cases where we have a varargs call to an otherwise pure method. So, we need something smarter.
- // For the outermost arguments, we just have a PhantomArguments that magically knows that it
- // should load the arguments from the call frame. For the inline arguments, we have the heap map
- // in the availabiltiy map track each possible inline argument as a promoted heap location. If the
- // PutStacks for those arguments aren't sunk, those heap locations will map to very trivial
- // availabilities (they will be flush availabilities). But if sinking happens then those
- // availabilities may become whatever. OSR exit should be able to handle this quite naturally,
- // since those availabilities speak of the stack before the optimizing compiler stack frame is
- // torn down.
-
- if (verbose)
- dataLog("After interference analysis: ", listDump(m_candidates), "\n");
- }
-
- void transform()
- {
- InsertionSet insertionSet(m_graph);
-
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
-
- auto getArrayLength = [&] (Node* candidate) -> Node* {
- return emitCodeToGetArgumentsArrayLength(
- insertionSet, candidate, nodeIndex, node->origin);
- };
-
- switch (node->op()) {
- case CreateDirectArguments:
- if (!m_candidates.contains(node))
- break;
-
- node->setOpAndDefaultFlags(PhantomDirectArguments);
- break;
-
- case CreateClonedArguments:
- if (!m_candidates.contains(node))
- break;
-
- node->setOpAndDefaultFlags(PhantomClonedArguments);
- break;
-
- case GetFromArguments: {
- Node* candidate = node->child1().node();
- if (!m_candidates.contains(candidate))
- break;
-
- DFG_ASSERT(
- m_graph, node,
- node->child1()->op() == CreateDirectArguments
- || node->child1()->op() == PhantomDirectArguments);
- VirtualRegister reg =
- virtualRegisterForArgument(node->capturedArgumentsOffset().offset() + 1) +
- node->origin.semantic.stackOffset();
- StackAccessData* data = m_graph.m_stackAccessData.add(reg, FlushedJSValue);
- node->convertToGetStack(data);
- break;
- }
-
- case GetArrayLength: {
- Node* candidate = node->child1().node();
- if (!m_candidates.contains(candidate))
- break;
-
- // Meh, this is kind of hackish - we use an Identity so that we can reuse the
- // getArrayLength() helper.
- node->convertToIdentityOn(getArrayLength(candidate));
- break;
- }
-
- case GetByVal: {
- // FIXME: For ClonedArguments, we would have already done a separate bounds check.
- // This code will cause us to have two bounds checks - the original one that we
- // already factored out in SSALoweringPhase, and the new one we insert here, which is
- // often implicitly part of GetMyArgumentByVal. LLVM will probably eliminate the
- // second bounds check, but still - that's just silly.
- // https://bugs.webkit.org/show_bug.cgi?id=143076
-
- Node* candidate = node->child1().node();
- if (!m_candidates.contains(candidate))
- break;
-
- Node* result = nullptr;
- if (node->child2()->isInt32Constant()) {
- unsigned index = node->child2()->asUInt32();
- InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame;
-
- bool safeToGetStack;
- if (inlineCallFrame)
- safeToGetStack = index < inlineCallFrame->arguments.size() - 1;
- else {
- safeToGetStack =
- index < static_cast<unsigned>(codeBlock()->numParameters()) - 1;
- }
- if (safeToGetStack) {
- StackAccessData* data;
- VirtualRegister arg = virtualRegisterForArgument(index + 1);
- if (inlineCallFrame)
- arg += inlineCallFrame->stackOffset;
- data = m_graph.m_stackAccessData.add(arg, FlushedJSValue);
-
- if (!inlineCallFrame || inlineCallFrame->isVarargs()) {
- insertionSet.insertNode(
- nodeIndex, SpecNone, CheckInBounds, node->origin,
- node->child2(), Edge(getArrayLength(candidate), Int32Use));
- }
-
- result = insertionSet.insertNode(
- nodeIndex, node->prediction(), GetStack, node->origin, OpInfo(data));
- }
- }
-
- if (!result) {
- result = insertionSet.insertNode(
- nodeIndex, node->prediction(), GetMyArgumentByVal, node->origin,
- node->child1(), node->child2());
- }
-
- // Need to do this because we may have a data format conversion here.
- node->convertToIdentityOn(result);
- break;
- }
-
- case LoadVarargs: {
- Node* candidate = node->child1().node();
- if (!m_candidates.contains(candidate))
- break;
-
- LoadVarargsData* varargsData = node->loadVarargsData();
- InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame;
- if (inlineCallFrame
- && !inlineCallFrame->isVarargs()
- && inlineCallFrame->arguments.size() - varargsData->offset <= varargsData->limit) {
- Node* argumentCount = insertionSet.insertConstant(
- nodeIndex, node->origin,
- jsNumber(inlineCallFrame->arguments.size() - varargsData->offset));
- insertionSet.insertNode(
- nodeIndex, SpecNone, MovHint, node->origin,
- OpInfo(varargsData->count.offset()), Edge(argumentCount));
- insertionSet.insertNode(
- nodeIndex, SpecNone, PutStack, node->origin,
- OpInfo(m_graph.m_stackAccessData.add(varargsData->count, FlushedInt32)),
- Edge(argumentCount, Int32Use));
-
- DFG_ASSERT(m_graph, node, varargsData->limit - 1 >= varargsData->mandatoryMinimum);
- // Define our limit to not include "this", since that's a bit easier to reason about.
- unsigned limit = varargsData->limit - 1;
- Node* undefined = nullptr;
- for (unsigned storeIndex = 0; storeIndex < limit; ++storeIndex) {
- // First determine if we have an element we can load, and load it if
- // possible.
-
- unsigned loadIndex = storeIndex + varargsData->offset;
-
- Node* value;
- if (loadIndex + 1 < inlineCallFrame->arguments.size()) {
- VirtualRegister reg =
- virtualRegisterForArgument(loadIndex + 1) +
- inlineCallFrame->stackOffset;
- StackAccessData* data = m_graph.m_stackAccessData.add(
- reg, FlushedJSValue);
-
- value = insertionSet.insertNode(
- nodeIndex, SpecNone, GetStack, node->origin, OpInfo(data));
- } else {
- // FIXME: We shouldn't have to store anything if
- // storeIndex >= varargsData->mandatoryMinimum, but we will still
- // have GetStacks in that range. So if we don't do the stores, we'll
- // have degenerate IR: we'll have GetStacks of something that didn't
- // have PutStacks.
- // https://bugs.webkit.org/show_bug.cgi?id=147434
-
- if (!undefined) {
- undefined = insertionSet.insertConstant(
- nodeIndex, node->origin, jsUndefined());
- }
- value = undefined;
- }
-
- // Now that we have a value, store it.
-
- VirtualRegister reg = varargsData->start + storeIndex;
- StackAccessData* data =
- m_graph.m_stackAccessData.add(reg, FlushedJSValue);
-
- insertionSet.insertNode(
- nodeIndex, SpecNone, MovHint, node->origin, OpInfo(reg.offset()),
- Edge(value));
- insertionSet.insertNode(
- nodeIndex, SpecNone, PutStack, node->origin, OpInfo(data),
- Edge(value));
- }
-
- node->remove();
- break;
- }
-
- node->setOpAndDefaultFlags(ForwardVarargs);
- break;
- }
-
- case CallVarargs:
- case ConstructVarargs: {
- Node* candidate = node->child2().node();
- if (!m_candidates.contains(candidate))
- break;
-
- CallVarargsData* varargsData = node->callVarargsData();
- InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame;
- if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
- Vector<Node*> arguments;
- for (unsigned i = 1 + varargsData->firstVarArgOffset; i < inlineCallFrame->arguments.size(); ++i) {
- StackAccessData* data = m_graph.m_stackAccessData.add(
- virtualRegisterForArgument(i) + inlineCallFrame->stackOffset,
- FlushedJSValue);
-
- Node* value = insertionSet.insertNode(
- nodeIndex, SpecNone, GetStack, node->origin, OpInfo(data));
-
- arguments.append(value);
- }
-
- unsigned firstChild = m_graph.m_varArgChildren.size();
- m_graph.m_varArgChildren.append(node->child1());
- m_graph.m_varArgChildren.append(node->child3());
- for (Node* argument : arguments)
- m_graph.m_varArgChildren.append(Edge(argument));
- node->setOpAndDefaultFlags(
- node->op() == CallVarargs ? Call : Construct);
- node->children = AdjacencyList(
- AdjacencyList::Variable,
- firstChild, m_graph.m_varArgChildren.size() - firstChild);
- break;
- }
-
- node->setOpAndDefaultFlags(
- node->op() == CallVarargs ? CallForwardVarargs : ConstructForwardVarargs);
- break;
- }
-
- case CheckArray:
- case GetButterfly: {
- if (!m_candidates.contains(node->child1().node()))
- break;
- node->remove();
- break;
- }
-
- default:
- break;
- }
- }
-
- insertionSet.execute(block);
- }
- }
-
- HashSet<Node*> m_candidates;
-};
-
-} // anonymous namespace
-
-bool performArgumentsElimination(Graph& graph)
-{
- SamplingRegion samplingRegion("DFG Arguments Elimination Phase");
- return runPhase<ArgumentsEliminationPhase>(graph);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp
new file mode 100644
index 000000000..936603150
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp
@@ -0,0 +1,798 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGArgumentsSimplificationPhase.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGBasicBlock.h"
+#include "DFGGraph.h"
+#include "DFGInsertionSet.h"
+#include "DFGPhase.h"
+#include "DFGValidate.h"
+#include "DFGVariableAccessDataDump.h"
+#include "Operations.h"
+#include <wtf/HashSet.h>
+#include <wtf/HashMap.h>
+
+namespace JSC { namespace DFG {
+
+namespace {
+
+struct ArgumentsAliasingData {
+ InlineCallFrame* callContext;
+ bool callContextSet;
+ bool multipleCallContexts;
+
+ bool assignedFromArguments;
+ bool assignedFromManyThings;
+
+ bool escapes;
+
+ ArgumentsAliasingData()
+ : callContext(0)
+ , callContextSet(false)
+ , multipleCallContexts(false)
+ , assignedFromArguments(false)
+ , assignedFromManyThings(false)
+ , escapes(false)
+ {
+ }
+
+ void mergeCallContext(InlineCallFrame* newCallContext)
+ {
+ if (multipleCallContexts)
+ return;
+
+ if (!callContextSet) {
+ callContext = newCallContext;
+ callContextSet = true;
+ return;
+ }
+
+ if (callContext == newCallContext)
+ return;
+
+ multipleCallContexts = true;
+ }
+
+ bool callContextIsValid()
+ {
+ return callContextSet && !multipleCallContexts;
+ }
+
+ void mergeArgumentsAssignment()
+ {
+ assignedFromArguments = true;
+ }
+
+ void mergeNonArgumentsAssignment()
+ {
+ assignedFromManyThings = true;
+ }
+
+ bool argumentsAssignmentIsValid()
+ {
+ return assignedFromArguments && !assignedFromManyThings;
+ }
+
+ bool isValid()
+ {
+ return callContextIsValid() && argumentsAssignmentIsValid() && !escapes;
+ }
+};
+
+} // end anonymous namespace
+
+class ArgumentsSimplificationPhase : public Phase {
+public:
+ ArgumentsSimplificationPhase(Graph& graph)
+ : Phase(graph, "arguments simplification")
+ {
+ }
+
+ bool run()
+ {
+ if (!m_graph.m_hasArguments)
+ return false;
+
+ bool changed = false;
+
+ // Record which arguments are known to escape no matter what.
+ for (InlineCallFrameSet::iterator iter = m_graph.m_inlineCallFrames->begin(); !!iter; ++iter)
+ pruneObviousArgumentCreations(*iter);
+ pruneObviousArgumentCreations(0); // the machine call frame.
+
+ // Create data for variable access datas that we will want to analyze.
+ for (unsigned i = m_graph.m_variableAccessData.size(); i--;) {
+ VariableAccessData* variableAccessData = &m_graph.m_variableAccessData[i];
+ if (!variableAccessData->isRoot())
+ continue;
+ if (variableAccessData->isCaptured())
+ continue;
+ m_argumentsAliasing.add(variableAccessData, ArgumentsAliasingData());
+ }
+
+ // Figure out which variables are live, using a conservative approximation of
+ // liveness.
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+ for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
+ Node* node = block->at(indexInBlock);
+ switch (node->op()) {
+ case GetLocal:
+ case Flush:
+ case PhantomLocal:
+ m_isLive.add(node->variableAccessData());
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ // Figure out which variables alias the arguments and nothing else, and are
+ // used only for GetByVal and GetArrayLength accesses. At the same time,
+ // identify uses of CreateArguments that are not consistent with the arguments
+ // being aliased only to variables that satisfy these constraints.
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+ for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
+ Node* node = block->at(indexInBlock);
+ switch (node->op()) {
+ case CreateArguments: {
+ // Ignore this op. If we see a lone CreateArguments then we want to
+ // completely ignore it because:
+ // 1) The default would be to see that the child is a GetLocal on the
+ // arguments register and conclude that we have an arguments escape.
+ // 2) The fact that a CreateArguments exists does not mean that it
+ // will continue to exist after we're done with this phase. As far
+ // as this phase is concerned, a CreateArguments only "exists" if it
+ // is used in a manner that necessitates its existance.
+ break;
+ }
+
+ case TearOffArguments: {
+ // Ignore arguments tear off, because it's only relevant if we actually
+ // need to create the arguments.
+ break;
+ }
+
+ case SetLocal: {
+ Node* source = node->child1().node();
+ VariableAccessData* variableAccessData = node->variableAccessData();
+ VirtualRegister argumentsRegister =
+ m_graph.uncheckedArgumentsRegisterFor(node->codeOrigin);
+ if (source->op() != CreateArguments && source->op() != PhantomArguments) {
+ // Make sure that the source of the SetLocal knows that if it's
+ // a variable that we think is aliased to the arguments, then it
+ // may escape at this point. In future, we could track transitive
+ // aliasing. But not yet.
+ observeBadArgumentsUse(source);
+
+ // If this is an assignment to the arguments register, then
+ // pretend as if the arguments were created. We don't want to
+ // optimize code that explicitly assigns to the arguments,
+ // because that seems too ugly.
+
+ // But, before getting rid of CreateArguments, we will have
+ // an assignment to the arguments registers with JSValue().
+ // That's because CSE will refuse to get rid of the
+ // init_lazy_reg since it treats CreateArguments as reading
+ // local variables. That could be fixed, but it's easier to
+ // work around this here.
+ if (source->op() == JSConstant
+ && !source->valueOfJSConstant(codeBlock()))
+ break;
+
+ // If the variable is totally dead, then ignore it.
+ if (!m_isLive.contains(variableAccessData))
+ break;
+
+ if (argumentsRegister.isValid()
+ && (variableAccessData->local() == argumentsRegister
+ || variableAccessData->local() == unmodifiedArgumentsRegister(argumentsRegister))) {
+ m_createsArguments.add(node->codeOrigin.inlineCallFrame);
+ break;
+ }
+
+ if (variableAccessData->isCaptured())
+ break;
+
+ // Make sure that if it's a variable that we think is aliased to
+ // the arguments, that we know that it might actually not be.
+ ArgumentsAliasingData& data =
+ m_argumentsAliasing.find(variableAccessData)->value;
+ data.mergeNonArgumentsAssignment();
+ data.mergeCallContext(node->codeOrigin.inlineCallFrame);
+ break;
+ }
+ if (argumentsRegister.isValid()
+ && (variableAccessData->local() == argumentsRegister
+ || variableAccessData->local() == unmodifiedArgumentsRegister(argumentsRegister))) {
+ if (node->codeOrigin.inlineCallFrame == source->codeOrigin.inlineCallFrame)
+ break;
+ m_createsArguments.add(source->codeOrigin.inlineCallFrame);
+ break;
+ }
+ if (variableAccessData->isCaptured()) {
+ m_createsArguments.add(source->codeOrigin.inlineCallFrame);
+ break;
+ }
+ ArgumentsAliasingData& data =
+ m_argumentsAliasing.find(variableAccessData)->value;
+ data.mergeArgumentsAssignment();
+ // This ensures that the variable's uses are in the same context as
+ // the arguments it is aliasing.
+ data.mergeCallContext(node->codeOrigin.inlineCallFrame);
+ data.mergeCallContext(source->codeOrigin.inlineCallFrame);
+ break;
+ }
+
+ case GetLocal:
+ case Phi: /* FIXME: https://bugs.webkit.org/show_bug.cgi?id=108555 */ {
+ VariableAccessData* variableAccessData = node->variableAccessData();
+ if (variableAccessData->isCaptured())
+ break;
+ ArgumentsAliasingData& data =
+ m_argumentsAliasing.find(variableAccessData)->value;
+ data.mergeCallContext(node->codeOrigin.inlineCallFrame);
+ break;
+ }
+
+ case Flush: {
+ VariableAccessData* variableAccessData = node->variableAccessData();
+ if (variableAccessData->isCaptured())
+ break;
+ ArgumentsAliasingData& data =
+ m_argumentsAliasing.find(variableAccessData)->value;
+ data.mergeCallContext(node->codeOrigin.inlineCallFrame);
+
+ // If a variable is used in a flush then by definition it escapes.
+ data.escapes = true;
+ break;
+ }
+
+ case SetArgument: {
+ VariableAccessData* variableAccessData = node->variableAccessData();
+ if (variableAccessData->isCaptured())
+ break;
+ ArgumentsAliasingData& data =
+ m_argumentsAliasing.find(variableAccessData)->value;
+ data.mergeNonArgumentsAssignment();
+ data.mergeCallContext(node->codeOrigin.inlineCallFrame);
+ break;
+ }
+
+ case GetByVal: {
+ if (node->arrayMode().type() != Array::Arguments) {
+ observeBadArgumentsUses(node);
+ break;
+ }
+
+ // That's so awful and pretty much impossible since it would
+ // imply that the arguments were predicted integer, but it's
+ // good to be defensive and thorough.
+ observeBadArgumentsUse(node->child2().node());
+ observeProperArgumentsUse(node, node->child1());
+ break;
+ }
+
+ case GetArrayLength: {
+ if (node->arrayMode().type() != Array::Arguments) {
+ observeBadArgumentsUses(node);
+ break;
+ }
+
+ observeProperArgumentsUse(node, node->child1());
+ break;
+ }
+
+ case Phantom:
+ // We don't care about phantom uses, since phantom uses are all about
+ // just keeping things alive for OSR exit. If something - like the
+ // CreateArguments - is just being kept alive, then this transformation
+ // will not break this, since the Phantom will now just keep alive a
+ // PhantomArguments and OSR exit will still do the right things.
+ break;
+
+ case CheckStructure:
+ case StructureTransitionWatchpoint:
+ case CheckArray:
+ // We don't care about these because if we get uses of the relevant
+ // variable then we can safely get rid of these, too. This of course
+ // relies on there not being any information transferred by the CFA
+ // from a CheckStructure on one variable to the information about the
+ // structures of another variable.
+ break;
+
+ case MovHint:
+ // We don't care about MovHints at all, since they represent what happens
+ // in bytecode. We rematerialize arguments objects on OSR exit anyway.
+ break;
+
+ default:
+ observeBadArgumentsUses(node);
+ break;
+ }
+ }
+ }
+
+ // Now we know which variables are aliased to arguments. But if any of them are
+ // found to have escaped, or were otherwise invalidated, then we need to mark
+ // the arguments as requiring creation. This is a property of SetLocals to
+ // variables that are neither the correct arguments register nor are marked as
+ // being arguments-aliased.
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+ for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
+ Node* node = block->at(indexInBlock);
+ if (node->op() != SetLocal)
+ continue;
+ Node* source = node->child1().node();
+ if (source->op() != CreateArguments)
+ continue;
+ VariableAccessData* variableAccessData = node->variableAccessData();
+ if (variableAccessData->isCaptured()) {
+ // The captured case would have already been taken care of in the
+ // previous pass.
+ continue;
+ }
+
+ ArgumentsAliasingData& data =
+ m_argumentsAliasing.find(variableAccessData)->value;
+ if (data.isValid())
+ continue;
+
+ m_createsArguments.add(source->codeOrigin.inlineCallFrame);
+ }
+ }
+
+ InsertionSet insertionSet(m_graph);
+
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+ for (unsigned indexInBlock = 0; indexInBlock < block->size(); indexInBlock++) {
+ Node* node = block->at(indexInBlock);
+ switch (node->op()) {
+ case SetLocal: {
+ Node* source = node->child1().node();
+ if (source->op() != CreateArguments)
+ break;
+
+ if (m_createsArguments.contains(source->codeOrigin.inlineCallFrame))
+ break;
+
+ VariableAccessData* variableAccessData = node->variableAccessData();
+
+ if (m_graph.argumentsRegisterFor(node->codeOrigin) == variableAccessData->local()
+ || unmodifiedArgumentsRegister(m_graph.argumentsRegisterFor(node->codeOrigin)) == variableAccessData->local())
+ break;
+
+ if (variableAccessData->mergeIsArgumentsAlias(true)) {
+ changed = true;
+
+ // Make sure that the variable knows, that it may now hold non-cell values.
+ variableAccessData->predict(SpecEmpty);
+ }
+
+ // Make sure that the SetLocal doesn't check that the input is a Cell.
+ if (node->child1().useKind() != UntypedUse) {
+ node->child1().setUseKind(UntypedUse);
+ changed = true;
+ }
+ break;
+ }
+
+ case Flush: {
+ VariableAccessData* variableAccessData = node->variableAccessData();
+
+ if (variableAccessData->isCaptured()
+ || !m_argumentsAliasing.find(variableAccessData)->value.isValid()
+ || m_createsArguments.contains(node->codeOrigin.inlineCallFrame))
+ break;
+
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+
+ case Phantom: {
+ // It's highly likely that we will have a Phantom referencing either
+ // CreateArguments, or a local op for the arguments register, or a
+ // local op for an arguments-aliased variable. In any of those cases,
+ // we should remove the phantom reference, since:
+ // 1) Phantoms only exist to aid OSR exit. But arguments simplification
+ // has its own OSR exit story, which is to inform OSR exit to reify
+ // the arguments as necessary.
+ // 2) The Phantom may keep the CreateArguments node alive, which is
+ // precisely what we don't want.
+ for (unsigned i = 0; i < AdjacencyList::Size; ++i)
+ detypeArgumentsReferencingPhantomChild(node, i);
+ break;
+ }
+
+ case CheckStructure:
+ case StructureTransitionWatchpoint:
+ case CheckArray: {
+ // We can just get rid of this node, if it references a phantom argument.
+ if (!isOKToOptimize(node->child1().node()))
+ break;
+ node->convertToPhantom();
+ break;
+ }
+
+ case GetByVal: {
+ if (node->arrayMode().type() != Array::Arguments)
+ break;
+
+ // This can be simplified to GetMyArgumentByVal if we know that
+ // it satisfies either condition (1) or (2):
+ // 1) Its first child is a valid ArgumentsAliasingData and the
+ // InlineCallFrame* is not marked as creating arguments.
+ // 2) Its first child is CreateArguments and its InlineCallFrame*
+ // is not marked as creating arguments.
+
+ if (!isOKToOptimize(node->child1().node()))
+ break;
+
+ insertionSet.insertNode(
+ indexInBlock, SpecNone, Phantom, node->codeOrigin, node->child1());
+
+ node->child1() = node->child2();
+ node->child2() = Edge();
+ node->setOpAndDefaultFlags(GetMyArgumentByVal);
+ changed = true;
+ --indexInBlock; // Force reconsideration of this op now that it's a GetMyArgumentByVal.
+ break;
+ }
+
+ case GetArrayLength: {
+ if (node->arrayMode().type() != Array::Arguments)
+ break;
+
+ if (!isOKToOptimize(node->child1().node()))
+ break;
+
+ insertionSet.insertNode(
+ indexInBlock, SpecNone, Phantom, node->codeOrigin, node->child1());
+
+ node->child1() = Edge();
+ node->setOpAndDefaultFlags(GetMyArgumentsLength);
+ changed = true;
+ --indexInBlock; // Force reconsideration of this op noew that it's a GetMyArgumentsLength.
+ break;
+ }
+
+ case GetMyArgumentsLength:
+ case GetMyArgumentsLengthSafe: {
+ if (m_createsArguments.contains(node->codeOrigin.inlineCallFrame)) {
+ ASSERT(node->op() == GetMyArgumentsLengthSafe);
+ break;
+ }
+ if (node->op() == GetMyArgumentsLengthSafe) {
+ node->setOp(GetMyArgumentsLength);
+ changed = true;
+ }
+
+ CodeOrigin codeOrigin = node->codeOrigin;
+ if (!codeOrigin.inlineCallFrame)
+ break;
+
+ // We know exactly what this will return. But only after we have checked
+ // that nobody has escaped our arguments.
+ insertionSet.insertNode(
+ indexInBlock, SpecNone, CheckArgumentsNotCreated, codeOrigin);
+
+ m_graph.convertToConstant(
+ node, jsNumber(codeOrigin.inlineCallFrame->arguments.size() - 1));
+ changed = true;
+ break;
+ }
+
+ case GetMyArgumentByVal:
+ case GetMyArgumentByValSafe: {
+ if (m_createsArguments.contains(node->codeOrigin.inlineCallFrame)) {
+ ASSERT(node->op() == GetMyArgumentByValSafe);
+ break;
+ }
+ if (node->op() == GetMyArgumentByValSafe) {
+ node->setOp(GetMyArgumentByVal);
+ changed = true;
+ }
+ if (!node->codeOrigin.inlineCallFrame)
+ break;
+ if (!node->child1()->hasConstant())
+ break;
+ JSValue value = node->child1()->valueOfJSConstant(codeBlock());
+ if (!value.isInt32())
+ break;
+ int32_t index = value.asInt32();
+ if (index < 0
+ || static_cast<size_t>(index + 1) >=
+ node->codeOrigin.inlineCallFrame->arguments.size())
+ break;
+
+ // We know which argument this is accessing. But only after we have checked
+ // that nobody has escaped our arguments. We also need to ensure that the
+ // index is kept alive. That's somewhat pointless since it's a constant, but
+ // it's important because this is one of those invariants that we like to
+ // have in the DFG. Note finally that we use the GetLocalUnlinked opcode
+ // here, since this is being done _after_ the prediction propagation phase
+ // has run - therefore it makes little sense to link the GetLocal operation
+ // into the VariableAccessData and Phi graphs.
+
+ CodeOrigin codeOrigin = node->codeOrigin;
+ AdjacencyList children = node->children;
+
+ node->convertToGetLocalUnlinked(
+ VirtualRegister(
+ node->codeOrigin.inlineCallFrame->stackOffset +
+ m_graph.baselineCodeBlockFor(node->codeOrigin)->argumentIndexAfterCapture(index)));
+
+ insertionSet.insertNode(
+ indexInBlock, SpecNone, CheckArgumentsNotCreated,
+ codeOrigin);
+ insertionSet.insertNode(
+ indexInBlock, SpecNone, Phantom, codeOrigin, children);
+
+ changed = true;
+ break;
+ }
+
+ case TearOffArguments: {
+ if (m_createsArguments.contains(node->codeOrigin.inlineCallFrame))
+ continue;
+
+ node->convertToPhantom();
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ insertionSet.execute(block);
+ }
+
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+ for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
+ Node* node = block->at(indexInBlock);
+ if (node->op() != CreateArguments)
+ continue;
+ // If this is a CreateArguments for an InlineCallFrame* that does
+ // not create arguments, then replace it with a PhantomArguments.
+ // PhantomArguments is a non-executing node that just indicates
+ // that the node should be reified as an arguments object on OSR
+ // exit.
+ if (m_createsArguments.contains(node->codeOrigin.inlineCallFrame))
+ continue;
+ insertionSet.insertNode(
+ indexInBlock, SpecNone, Phantom, node->codeOrigin, node->children);
+ node->setOpAndDefaultFlags(PhantomArguments);
+ node->children.reset();
+ changed = true;
+ }
+ insertionSet.execute(block);
+ }
+
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+ for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
+ Node* node = block->at(indexInBlock);
+ if (node->op() != Phantom)
+ continue;
+ for (unsigned i = 0; i < AdjacencyList::Size; ++i)
+ detypeArgumentsReferencingPhantomChild(node, i);
+ }
+ }
+
+ if (changed) {
+ m_graph.dethread();
+ m_graph.m_form = LoadStore;
+ }
+
+ return changed;
+ }
+
+private:
+ HashSet<InlineCallFrame*,
+ DefaultHash<InlineCallFrame*>::Hash,
+ NullableHashTraits<InlineCallFrame*>> m_createsArguments;
+ HashMap<VariableAccessData*, ArgumentsAliasingData,
+ DefaultHash<VariableAccessData*>::Hash,
+ NullableHashTraits<VariableAccessData*>> m_argumentsAliasing;
+ HashSet<VariableAccessData*> m_isLive;
+
+ void pruneObviousArgumentCreations(InlineCallFrame* inlineCallFrame)
+ {
+ ScriptExecutable* executable = m_graph.executableFor(inlineCallFrame);
+ if (m_graph.m_executablesWhoseArgumentsEscaped.contains(executable)
+ || executable->isStrictMode())
+ m_createsArguments.add(inlineCallFrame);
+ }
+
+ void observeBadArgumentsUse(Node* node)
+ {
+ if (!node)
+ return;
+
+ switch (node->op()) {
+ case CreateArguments: {
+ m_createsArguments.add(node->codeOrigin.inlineCallFrame);
+ break;
+ }
+
+ case GetLocal: {
+ VirtualRegister argumentsRegister = m_graph.uncheckedArgumentsRegisterFor(node->codeOrigin);
+ if (argumentsRegister.isValid()
+ && (node->local() == argumentsRegister
+ || node->local() == unmodifiedArgumentsRegister(argumentsRegister))) {
+ m_createsArguments.add(node->codeOrigin.inlineCallFrame);
+ break;
+ }
+
+ VariableAccessData* variableAccessData = node->variableAccessData();
+ if (variableAccessData->isCaptured())
+ break;
+
+ ArgumentsAliasingData& data = m_argumentsAliasing.find(variableAccessData)->value;
+ data.escapes = true;
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ void observeBadArgumentsUses(Node* node)
+ {
+ for (unsigned i = m_graph.numChildren(node); i--;)
+ observeBadArgumentsUse(m_graph.child(node, i).node());
+ }
+
+ void observeProperArgumentsUse(Node* node, Edge edge)
+ {
+ if (edge->op() != GetLocal) {
+ // When can this happen? At least two cases that I can think
+ // of:
+ //
+ // 1) Aliased use of arguments in the same basic block,
+ // like:
+ //
+ // var a = arguments;
+ // var x = arguments[i];
+ //
+ // 2) If we're accessing arguments we got from the heap!
+
+ if (edge->op() == CreateArguments
+ && node->codeOrigin.inlineCallFrame
+ != edge->codeOrigin.inlineCallFrame)
+ m_createsArguments.add(edge->codeOrigin.inlineCallFrame);
+
+ return;
+ }
+
+ VariableAccessData* variableAccessData = edge->variableAccessData();
+ if (edge->local() == m_graph.uncheckedArgumentsRegisterFor(edge->codeOrigin)
+ && node->codeOrigin.inlineCallFrame != edge->codeOrigin.inlineCallFrame) {
+ m_createsArguments.add(edge->codeOrigin.inlineCallFrame);
+ return;
+ }
+
+ if (variableAccessData->isCaptured())
+ return;
+
+ ArgumentsAliasingData& data = m_argumentsAliasing.find(variableAccessData)->value;
+ data.mergeCallContext(node->codeOrigin.inlineCallFrame);
+ }
+
+ bool isOKToOptimize(Node* source)
+ {
+ if (m_createsArguments.contains(source->codeOrigin.inlineCallFrame))
+ return false;
+
+ switch (source->op()) {
+ case GetLocal: {
+ VariableAccessData* variableAccessData = source->variableAccessData();
+ VirtualRegister argumentsRegister = m_graph.uncheckedArgumentsRegisterFor(source->codeOrigin);
+ if (!argumentsRegister.isValid())
+ break;
+ if (argumentsRegister == variableAccessData->local())
+ return true;
+ if (unmodifiedArgumentsRegister(argumentsRegister) == variableAccessData->local())
+ return true;
+ if (variableAccessData->isCaptured())
+ break;
+ ArgumentsAliasingData& data =
+ m_argumentsAliasing.find(variableAccessData)->value;
+ if (!data.isValid())
+ break;
+
+ return true;
+ }
+
+ case CreateArguments: {
+ return true;
+ }
+
+ default:
+ break;
+ }
+
+ return false;
+ }
+
+ void detypeArgumentsReferencingPhantomChild(Node* node, unsigned edgeIndex)
+ {
+ Edge edge = node->children.child(edgeIndex);
+ if (!edge)
+ return;
+
+ switch (edge->op()) {
+ case GetLocal: {
+ VariableAccessData* variableAccessData = edge->variableAccessData();
+ if (!variableAccessData->isArgumentsAlias())
+ break;
+ node->children.child(edgeIndex).setUseKind(UntypedUse);
+ break;
+ }
+
+ case PhantomArguments: {
+ node->children.child(edgeIndex).setUseKind(UntypedUse);
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+};
+
+bool performArgumentsSimplification(Graph& graph)
+{
+ SamplingRegion samplingRegion("DFG Arguments Simplification Phase");
+ return runPhase<ArgumentsSimplificationPhase>(graph);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+
diff --git a/Source/JavaScriptCore/dfg/DFGVarargsForwardingPhase.h b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.h
index ece3747ee..e8a24019e 100644
--- a/Source/JavaScriptCore/dfg/DFGVarargsForwardingPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,8 +23,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DFGVarargsForwardingPhase_h
-#define DFGVarargsForwardingPhase_h
+#ifndef DFGArgumentsSimplificationPhase_h
+#define DFGArgumentsSimplificationPhase_h
+
+#include <wtf/Platform.h>
#if ENABLE(DFG_JIT)
@@ -32,14 +34,16 @@ namespace JSC { namespace DFG {
class Graph;
-// Eliminates allocations of Arguments-class objects when they flow into CallVarargs, ConstructVarargs,
-// or LoadVarargs.
+// Simplifies reflective uses of the Arguments object:
+//
+// Inlined arguments.length -> constant
+// Inlined arguments[constant] -> GetLocalUnlinked
-bool performVarargsForwarding(Graph&);
+bool performArgumentsSimplification(Graph&);
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
-#endif // DFGVarargsForwardingPhase_h
+#endif // DFGArgumentsSimplificationPhase_h
diff --git a/Source/JavaScriptCore/dfg/DFGArgumentsUtilities.cpp b/Source/JavaScriptCore/dfg/DFGArgumentsUtilities.cpp
deleted file mode 100644
index 5d512b1b6..000000000
--- a/Source/JavaScriptCore/dfg/DFGArgumentsUtilities.cpp
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGArgumentsUtilities.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "JSCInlines.h"
-
-namespace JSC { namespace DFG {
-
-bool argumentsInvolveStackSlot(InlineCallFrame* inlineCallFrame, VirtualRegister reg)
-{
- if (!inlineCallFrame)
- return (reg.isArgument() && reg.toArgument()) || reg.isHeader();
-
- if (inlineCallFrame->isClosureCall
- && reg == VirtualRegister(inlineCallFrame->stackOffset + JSStack::Callee))
- return true;
-
- if (inlineCallFrame->isVarargs()
- && reg == VirtualRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount))
- return true;
-
- unsigned numArguments = inlineCallFrame->arguments.size() - 1;
- VirtualRegister argumentStart =
- VirtualRegister(inlineCallFrame->stackOffset) + CallFrame::argumentOffset(0);
- return reg >= argumentStart && reg < argumentStart + numArguments;
-}
-
-bool argumentsInvolveStackSlot(Node* candidate, VirtualRegister reg)
-{
- return argumentsInvolveStackSlot(candidate->origin.semantic.inlineCallFrame, reg);
-}
-
-Node* emitCodeToGetArgumentsArrayLength(
- InsertionSet& insertionSet, Node* arguments, unsigned nodeIndex, NodeOrigin origin)
-{
- Graph& graph = insertionSet.graph();
-
- DFG_ASSERT(
- graph, arguments,
- arguments->op() == CreateDirectArguments || arguments->op() == CreateScopedArguments
- || arguments->op() == CreateClonedArguments || arguments->op() == PhantomDirectArguments
- || arguments->op() == PhantomClonedArguments);
-
- InlineCallFrame* inlineCallFrame = arguments->origin.semantic.inlineCallFrame;
-
- if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
- return insertionSet.insertConstant(
- nodeIndex, origin, jsNumber(inlineCallFrame->arguments.size() - 1));
- }
-
- Node* argumentCount;
- if (!inlineCallFrame)
- argumentCount = insertionSet.insertNode(nodeIndex, SpecInt32, GetArgumentCount, origin);
- else {
- VirtualRegister argumentCountRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount);
-
- argumentCount = insertionSet.insertNode(
- nodeIndex, SpecInt32, GetStack, origin,
- OpInfo(graph.m_stackAccessData.add(argumentCountRegister, FlushedInt32)));
- }
-
- return insertionSet.insertNode(
- nodeIndex, SpecInt32, ArithSub, origin, OpInfo(Arith::Unchecked),
- Edge(argumentCount, Int32Use),
- insertionSet.insertConstantForUse(
- nodeIndex, origin, jsNumber(1), Int32Use));
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGArgumentsUtilities.h b/Source/JavaScriptCore/dfg/DFGArgumentsUtilities.h
deleted file mode 100644
index 82bfec30a..000000000
--- a/Source/JavaScriptCore/dfg/DFGArgumentsUtilities.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGArgumentsUtilities_h
-#define DFGArgumentsUtilities_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGGraph.h"
-#include "DFGInsertionSet.h"
-
-namespace JSC { namespace DFG {
-
-bool argumentsInvolveStackSlot(InlineCallFrame*, VirtualRegister);
-bool argumentsInvolveStackSlot(Node* candidate, VirtualRegister);
-
-Node* emitCodeToGetArgumentsArrayLength(
- InsertionSet&, Node* arguments, unsigned nodeIndex, NodeOrigin);
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGArgumentsUtilities_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGArithMode.cpp b/Source/JavaScriptCore/dfg/DFGArithMode.cpp
index c1dfe7f80..cc9699a02 100644
--- a/Source/JavaScriptCore/dfg/DFGArithMode.cpp
+++ b/Source/JavaScriptCore/dfg/DFGArithMode.cpp
@@ -28,7 +28,6 @@
#if ENABLE(DFG_JIT)
-#include "JSCInlines.h"
#include <wtf/PrintStream.h>
namespace WTF {
diff --git a/Source/JavaScriptCore/dfg/DFGArithMode.h b/Source/JavaScriptCore/dfg/DFGArithMode.h
index 4e09ac3e9..073ed6aba 100644
--- a/Source/JavaScriptCore/dfg/DFGArithMode.h
+++ b/Source/JavaScriptCore/dfg/DFGArithMode.h
@@ -40,14 +40,6 @@ enum Mode {
CheckOverflowAndNegativeZero, // Check for both overflow and negative zero.
DoOverflow // Up-convert to the smallest type that soundly represents all possible results after input type speculation.
};
-
-// Define the type of operation the rounding operation will perform.
-enum class RoundingMode {
- Int32, // The round operation produces a integer and -0 is considered as 0.
- Int32WithNegativeZeroCheck, // The round operation produces a integer and checks for -0.
- Double // The round operation produce a double. The result can be -0, NaN or (+/-)Infinity.
-};
-
} // namespace Arith
inline bool doesOverflow(Arith::Mode mode)
@@ -105,41 +97,6 @@ inline bool shouldCheckNegativeZero(Arith::Mode mode)
return true;
}
-inline bool subsumes(Arith::Mode earlier, Arith::Mode later)
-{
- switch (earlier) {
- case Arith::CheckOverflow:
- switch (later) {
- case Arith::Unchecked:
- case Arith::CheckOverflow:
- return true;
- default:
- return false;
- }
- case Arith::CheckOverflowAndNegativeZero:
- switch (later) {
- case Arith::Unchecked:
- case Arith::CheckOverflow:
- case Arith::CheckOverflowAndNegativeZero:
- return true;
- default:
- return false;
- }
- default:
- return earlier == later;
- }
-}
-
-inline bool producesInteger(Arith::RoundingMode mode)
-{
- return mode == Arith::RoundingMode::Int32WithNegativeZeroCheck || mode == Arith::RoundingMode::Int32;
-}
-
-inline bool shouldCheckNegativeZero(Arith::RoundingMode mode)
-{
- return mode == Arith::RoundingMode::Int32WithNegativeZeroCheck;
-}
-
} } // namespace JSC::DFG
namespace WTF {
diff --git a/Source/JavaScriptCore/dfg/DFGArrayMode.cpp b/Source/JavaScriptCore/dfg/DFGArrayMode.cpp
index 75fd6d144..ef9b1c494 100644
--- a/Source/JavaScriptCore/dfg/DFGArrayMode.cpp
+++ b/Source/JavaScriptCore/dfg/DFGArrayMode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,10 +28,9 @@
#if ENABLE(DFG_JIT)
-#include "ArrayPrototype.h"
#include "DFGAbstractValue.h"
#include "DFGGraph.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -49,18 +48,18 @@ ArrayMode ArrayMode::fromObserved(const ConcurrentJITLocker& locker, ArrayProfil
return ArrayMode(Array::Unprofiled);
case asArrayModes(NonArray):
if (action == Array::Write && !profile->mayInterceptIndexedAccesses(locker))
- return ArrayMode(Array::SelectUsingArguments, nonArray, Array::OutOfBounds, Array::Convert);
- return ArrayMode(Array::SelectUsingPredictions, nonArray).withSpeculationFromProfile(locker, profile, makeSafe);
+ return ArrayMode(Array::Undecided, nonArray, Array::OutOfBounds, Array::Convert);
+ return ArrayMode(Array::SelectUsingPredictions, nonArray);
case asArrayModes(ArrayWithUndecided):
if (action == Array::Write)
- return ArrayMode(Array::SelectUsingArguments, Array::Array, Array::OutOfBounds, Array::Convert);
- return ArrayMode(Array::Undecided, Array::Array, Array::OutOfBounds, Array::AsIs).withProfile(locker, profile, makeSafe);
+ return ArrayMode(Array::Undecided, Array::Array, Array::OutOfBounds, Array::Convert);
+ return ArrayMode(Array::Generic);
case asArrayModes(NonArray) | asArrayModes(ArrayWithUndecided):
if (action == Array::Write && !profile->mayInterceptIndexedAccesses(locker))
- return ArrayMode(Array::SelectUsingArguments, Array::PossiblyArray, Array::OutOfBounds, Array::Convert);
- return ArrayMode(Array::SelectUsingPredictions).withSpeculationFromProfile(locker, profile, makeSafe);
+ return ArrayMode(Array::Undecided, Array::PossiblyArray, Array::OutOfBounds, Array::Convert);
+ return ArrayMode(Array::SelectUsingPredictions);
case asArrayModes(NonArrayWithInt32):
return ArrayMode(Array::Int32, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe);
@@ -98,28 +97,10 @@ ArrayMode ArrayMode::fromObserved(const ConcurrentJITLocker& locker, ArrayProfil
case asArrayModes(NonArrayWithSlowPutArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage):
case asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage):
return ArrayMode(Array::SlowPutArrayStorage, Array::PossiblyArray, Array::AsIs).withProfile(locker, profile, makeSafe);
- case Int8ArrayMode:
- return ArrayMode(Array::Int8Array, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe);
- case Int16ArrayMode:
- return ArrayMode(Array::Int16Array, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe);
- case Int32ArrayMode:
- return ArrayMode(Array::Int32Array, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe);
- case Uint8ArrayMode:
- return ArrayMode(Array::Uint8Array, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe);
- case Uint8ClampedArrayMode:
- return ArrayMode(Array::Uint8ClampedArray, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe);
- case Uint16ArrayMode:
- return ArrayMode(Array::Uint16Array, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe);
- case Uint32ArrayMode:
- return ArrayMode(Array::Uint32Array, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe);
- case Float32ArrayMode:
- return ArrayMode(Array::Float32Array, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe);
- case Float64ArrayMode:
- return ArrayMode(Array::Float64Array, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe);
default:
if ((observed & asArrayModes(NonArray)) && profile->mayInterceptIndexedAccesses(locker))
- return ArrayMode(Array::SelectUsingPredictions).withSpeculationFromProfile(locker, profile, makeSafe);
+ return ArrayMode(Array::SelectUsingPredictions);
Array::Type type;
Array::Class arrayClass;
@@ -135,7 +116,7 @@ ArrayMode ArrayMode::fromObserved(const ConcurrentJITLocker& locker, ArrayProfil
else if (shouldUseInt32(observed))
type = Array::Int32;
else
- type = Array::SelectUsingArguments;
+ type = Array::Undecided;
if (hasSeenArray(observed) && hasSeenNonArray(observed))
arrayClass = Array::PossiblyArray;
@@ -150,9 +131,7 @@ ArrayMode ArrayMode::fromObserved(const ConcurrentJITLocker& locker, ArrayProfil
}
}
-ArrayMode ArrayMode::refine(
- Graph& graph, Node* node,
- SpeculatedType base, SpeculatedType index, SpeculatedType value) const
+ArrayMode ArrayMode::refine(SpeculatedType base, SpeculatedType index, SpeculatedType value, NodeFlags flags) const
{
if (!base || !index) {
// It can be that we had a legitimate arrayMode but no incoming predictions. That'll
@@ -165,10 +144,6 @@ ArrayMode ArrayMode::refine(
if (!isInt32Speculation(index))
return ArrayMode(Array::Generic);
- // If we had exited because of an exotic object behavior, then don't try to specialize.
- if (graph.hasExitSite(node->origin.semantic, ExoticObjectMode))
- return ArrayMode(Array::Generic);
-
// Note: our profiling currently doesn't give us good information in case we have
// an unlikely control flow path that sets the base to a non-cell value. Value
// profiling and prediction propagation will probably tell us that the value is
@@ -180,7 +155,10 @@ ArrayMode ArrayMode::refine(
// should just trust the array profile.
switch (type()) {
- case Array::SelectUsingArguments:
+ case Array::Unprofiled:
+ return ArrayMode(Array::ForceExit);
+
+ case Array::Undecided:
if (!value)
return withType(Array::ForceExit);
if (isInt32Speculation(value))
@@ -188,20 +166,7 @@ ArrayMode ArrayMode::refine(
if (isFullNumberSpeculation(value))
return withTypeAndConversion(Array::Double, Array::Convert);
return withTypeAndConversion(Array::Contiguous, Array::Convert);
- case Array::Undecided: {
- // If we have an OriginalArray and the JSArray prototype chain is sane,
- // any indexed access always return undefined. We have a fast path for that.
- JSGlobalObject* globalObject = graph.globalObjectFor(node->origin.semantic);
- if (node->op() == GetByVal
- && arrayClass() == Array::OriginalArray
- && globalObject->arrayPrototypeChainIsSane()
- && !graph.hasExitSite(node->origin.semantic, OutOfBounds)) {
- graph.watchpoints().addLazily(globalObject->arrayPrototype()->structure()->transitionWatchpointSet());
- graph.watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
- return withSpeculation(Array::SaneChain);
- }
- return ArrayMode(Array::Generic);
- }
+
case Array::Int32:
if (!value || isInt32Speculation(value))
return *this;
@@ -210,93 +175,54 @@ ArrayMode ArrayMode::refine(
return withTypeAndConversion(Array::Contiguous, Array::Convert);
case Array::Double:
+ if (flags & NodeBytecodeUsesAsInt)
+ return withTypeAndConversion(Array::Contiguous, Array::RageConvert);
if (!value || isFullNumberSpeculation(value))
return *this;
return withTypeAndConversion(Array::Contiguous, Array::Convert);
case Array::Contiguous:
+ if (doesConversion() && (flags & NodeBytecodeUsesAsInt))
+ return withConversion(Array::RageConvert);
return *this;
-
- case Array::Int8Array:
- case Array::Int16Array:
- case Array::Int32Array:
- case Array::Uint8Array:
- case Array::Uint8ClampedArray:
- case Array::Uint16Array:
- case Array::Uint32Array:
- case Array::Float32Array:
- case Array::Float64Array:
- switch (node->op()) {
- case PutByVal:
- if (graph.hasExitSite(node->origin.semantic, OutOfBounds) || !isInBounds())
- return withSpeculation(Array::OutOfBounds);
- return withSpeculation(Array::InBounds);
- default:
- return withSpeculation(Array::InBounds);
- }
- return *this;
- case Array::Unprofiled:
- case Array::SelectUsingPredictions: {
+
+ case Array::SelectUsingPredictions:
base &= ~SpecOther;
if (isStringSpeculation(base))
return withType(Array::String);
- if (isDirectArgumentsSpeculation(base) || isScopedArgumentsSpeculation(base)) {
- // Handle out-of-bounds accesses as generic accesses.
- if (graph.hasExitSite(node->origin.semantic, OutOfBounds) || !isInBounds())
- return ArrayMode(Array::Generic);
-
- if (isDirectArgumentsSpeculation(base))
- return withType(Array::DirectArguments);
- return withType(Array::ScopedArguments);
- }
-
- ArrayMode result;
- switch (node->op()) {
- case PutByVal:
- if (graph.hasExitSite(node->origin.semantic, OutOfBounds) || !isInBounds())
- result = withSpeculation(Array::OutOfBounds);
- else
- result = withSpeculation(Array::InBounds);
- break;
-
- default:
- result = withSpeculation(Array::InBounds);
- break;
- }
+ if (isArgumentsSpeculation(base))
+ return withType(Array::Arguments);
if (isInt8ArraySpeculation(base))
- return result.withType(Array::Int8Array);
+ return withType(Array::Int8Array);
if (isInt16ArraySpeculation(base))
- return result.withType(Array::Int16Array);
+ return withType(Array::Int16Array);
if (isInt32ArraySpeculation(base))
- return result.withType(Array::Int32Array);
+ return withType(Array::Int32Array);
if (isUint8ArraySpeculation(base))
- return result.withType(Array::Uint8Array);
+ return withType(Array::Uint8Array);
if (isUint8ClampedArraySpeculation(base))
- return result.withType(Array::Uint8ClampedArray);
+ return withType(Array::Uint8ClampedArray);
if (isUint16ArraySpeculation(base))
- return result.withType(Array::Uint16Array);
+ return withType(Array::Uint16Array);
if (isUint32ArraySpeculation(base))
- return result.withType(Array::Uint32Array);
+ return withType(Array::Uint32Array);
if (isFloat32ArraySpeculation(base))
- return result.withType(Array::Float32Array);
+ return withType(Array::Float32Array);
if (isFloat64ArraySpeculation(base))
- return result.withType(Array::Float64Array);
+ return withType(Array::Float64Array);
- if (type() == Array::Unprofiled)
- return ArrayMode(Array::ForceExit);
return ArrayMode(Array::Generic);
- }
default:
return *this;
@@ -316,8 +242,6 @@ Structure* ArrayMode::originalArrayStructure(Graph& graph, const CodeOrigin& cod
return globalObject->originalArrayStructureForIndexingType(ArrayWithDouble);
case Array::Contiguous:
return globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous);
- case Array::Undecided:
- return globalObject->originalArrayStructureForIndexingType(ArrayWithUndecided);
case Array::ArrayStorage:
return globalObject->originalArrayStructureForIndexingType(ArrayWithArrayStorage);
default:
@@ -341,57 +265,34 @@ Structure* ArrayMode::originalArrayStructure(Graph& graph, const CodeOrigin& cod
Structure* ArrayMode::originalArrayStructure(Graph& graph, Node* node) const
{
- return originalArrayStructure(graph, node->origin.semantic);
+ return originalArrayStructure(graph, node->codeOrigin);
}
-bool ArrayMode::alreadyChecked(Graph& graph, Node* node, const AbstractValue& value, IndexingType shape) const
+bool ArrayMode::alreadyChecked(Graph& graph, Node* node, AbstractValue& value, IndexingType shape) const
{
switch (arrayClass()) {
- case Array::OriginalArray: {
- if (value.m_structure.isTop())
- return false;
- for (unsigned i = value.m_structure.size(); i--;) {
- Structure* structure = value.m_structure[i];
- if ((structure->indexingType() & IndexingShapeMask) != shape)
- return false;
- if (!(structure->indexingType() & IsArray))
- return false;
- if (!graph.globalObjectFor(node->origin.semantic)->isOriginalArrayStructure(structure))
- return false;
- }
- return true;
- }
+ case Array::OriginalArray:
+ return value.m_currentKnownStructure.hasSingleton()
+ && (value.m_currentKnownStructure.singleton()->indexingType() & IndexingShapeMask) == shape
+ && (value.m_currentKnownStructure.singleton()->indexingType() & IsArray)
+ && graph.globalObjectFor(node->codeOrigin)->isOriginalArrayStructure(value.m_currentKnownStructure.singleton());
- case Array::Array: {
+ case Array::Array:
if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(shape | IsArray)))
return true;
- if (value.m_structure.isTop())
- return false;
- for (unsigned i = value.m_structure.size(); i--;) {
- Structure* structure = value.m_structure[i];
- if ((structure->indexingType() & IndexingShapeMask) != shape)
- return false;
- if (!(structure->indexingType() & IsArray))
- return false;
- }
- return true;
- }
+ return value.m_currentKnownStructure.hasSingleton()
+ && (value.m_currentKnownStructure.singleton()->indexingType() & IndexingShapeMask) == shape
+ && (value.m_currentKnownStructure.singleton()->indexingType() & IsArray);
- default: {
+ default:
if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(shape) | asArrayModes(shape | IsArray)))
return true;
- if (value.m_structure.isTop())
- return false;
- for (unsigned i = value.m_structure.size(); i--;) {
- Structure* structure = value.m_structure[i];
- if ((structure->indexingType() & IndexingShapeMask) != shape)
- return false;
- }
- return true;
- } }
+ return value.m_currentKnownStructure.hasSingleton()
+ && (value.m_currentKnownStructure.singleton()->indexingType() & IndexingShapeMask) == shape;
+ }
}
-bool ArrayMode::alreadyChecked(Graph& graph, Node* node, const AbstractValue& value) const
+bool ArrayMode::alreadyChecked(Graph& graph, Node* node, AbstractValue& value) const
{
switch (type()) {
case Array::Generic:
@@ -414,50 +315,29 @@ bool ArrayMode::alreadyChecked(Graph& graph, Node* node, const AbstractValue& va
case Array::ArrayStorage:
return alreadyChecked(graph, node, value, ArrayStorageShape);
-
- case Array::Undecided:
- return alreadyChecked(graph, node, value, UndecidedShape);
case Array::SlowPutArrayStorage:
switch (arrayClass()) {
- case Array::OriginalArray: {
+ case Array::OriginalArray:
CRASH();
return false;
- }
- case Array::Array: {
+ case Array::Array:
if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(ArrayWithArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage)))
return true;
- if (value.m_structure.isTop())
- return false;
- for (unsigned i = value.m_structure.size(); i--;) {
- Structure* structure = value.m_structure[i];
- if (!hasAnyArrayStorage(structure->indexingType()))
- return false;
- if (!(structure->indexingType() & IsArray))
- return false;
- }
- return true;
- }
+ return value.m_currentKnownStructure.hasSingleton()
+ && hasArrayStorage(value.m_currentKnownStructure.singleton()->indexingType())
+ && (value.m_currentKnownStructure.singleton()->indexingType() & IsArray);
- default: {
+ default:
if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage)))
return true;
- if (value.m_structure.isTop())
- return false;
- for (unsigned i = value.m_structure.size(); i--;) {
- Structure* structure = value.m_structure[i];
- if (!hasAnyArrayStorage(structure->indexingType()))
- return false;
- }
- return true;
- } }
-
- case Array::DirectArguments:
- return speculationChecked(value.m_type, SpecDirectArguments);
+ return value.m_currentKnownStructure.hasSingleton()
+ && hasArrayStorage(value.m_currentKnownStructure.singleton()->indexingType());
+ }
- case Array::ScopedArguments:
- return speculationChecked(value.m_type, SpecScopedArguments);
+ case Array::Arguments:
+ return speculationChecked(value.m_type, SpecArguments);
case Array::Int8Array:
return speculationChecked(value.m_type, SpecInt8Array);
@@ -488,7 +368,7 @@ bool ArrayMode::alreadyChecked(Graph& graph, Node* node, const AbstractValue& va
case Array::SelectUsingPredictions:
case Array::Unprofiled:
- case Array::SelectUsingArguments:
+ case Array::Undecided:
break;
}
@@ -501,8 +381,6 @@ const char* arrayTypeToString(Array::Type type)
switch (type) {
case Array::SelectUsingPredictions:
return "SelectUsingPredictions";
- case Array::SelectUsingArguments:
- return "SelectUsingArguments";
case Array::Unprofiled:
return "Unprofiled";
case Array::Generic:
@@ -523,10 +401,8 @@ const char* arrayTypeToString(Array::Type type)
return "ArrayStorage";
case Array::SlowPutArrayStorage:
return "SlowPutArrayStorage";
- case Array::DirectArguments:
- return "DirectArguments";
- case Array::ScopedArguments:
- return "ScopedArguments";
+ case Array::Arguments:
+ return "Arguments";
case Array::Int8Array:
return "Int8Array";
case Array::Int16Array:
@@ -595,6 +471,8 @@ const char* arrayConversionToString(Array::Conversion conversion)
return "AsIs";
case Array::Convert:
return "Convert";
+ case Array::RageConvert:
+ return "RageConvert";
default:
return "Unknown!";
}
diff --git a/Source/JavaScriptCore/dfg/DFGArrayMode.h b/Source/JavaScriptCore/dfg/DFGArrayMode.h
index 9c988c0b2..cbb87bd93 100644
--- a/Source/JavaScriptCore/dfg/DFGArrayMode.h
+++ b/Source/JavaScriptCore/dfg/DFGArrayMode.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGArrayMode_h
#define DFGArrayMode_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "ArrayProfile.h"
@@ -53,7 +55,6 @@ enum Action {
enum Type {
SelectUsingPredictions, // Implies that we need predictions to decide. We will never get to the backend in this mode.
- SelectUsingArguments, // Implies that we use the Node's arguments to decide. We will never get to the backend in this mode.
Unprofiled, // Implies that array profiling didn't see anything. But that could be because the operands didn't comply with basic type assumptions (base is cell, property is int). This either becomes Generic or ForceExit depending on value profiling.
ForceExit, // Implies that we have no idea how to execute this operation, so we should just give up.
Generic,
@@ -66,9 +67,7 @@ enum Type {
ArrayStorage,
SlowPutArrayStorage,
- DirectArguments,
- ScopedArguments,
-
+ Arguments,
Int8Array,
Int16Array,
Int32Array,
@@ -90,14 +89,14 @@ enum Class {
enum Speculation {
SaneChain, // In bounds and the array prototype chain is still intact, i.e. loading a hole doesn't require special treatment.
-
InBounds, // In bounds and not loading a hole.
ToHole, // Potentially storing to a hole.
OutOfBounds // Out-of-bounds access and anything can happen.
};
enum Conversion {
AsIs,
- Convert
+ Convert,
+ RageConvert
};
} // namespace Array
@@ -174,15 +173,11 @@ public:
return ArrayMode(type(), arrayClass(), speculation, conversion());
}
- ArrayMode withArrayClass(Array::Class arrayClass) const
- {
- return ArrayMode(type(), arrayClass, speculation(), conversion());
- }
-
- ArrayMode withSpeculationFromProfile(const ConcurrentJITLocker& locker, ArrayProfile* profile, bool makeSafe) const
+ ArrayMode withProfile(const ConcurrentJITLocker& locker, ArrayProfile* profile, bool makeSafe) const
{
Array::Speculation mySpeculation;
-
+ Array::Class myArrayClass;
+
if (makeSafe)
mySpeculation = Array::OutOfBounds;
else if (profile->mayStoreToHole(locker))
@@ -190,13 +185,6 @@ public:
else
mySpeculation = Array::InBounds;
- return withSpeculation(mySpeculation);
- }
-
- ArrayMode withProfile(const ConcurrentJITLocker& locker, ArrayProfile* profile, bool makeSafe) const
- {
- Array::Class myArrayClass;
-
if (isJSArray()) {
if (profile->usesOriginalArrayStructures(locker) && benefitsFromOriginalArray())
myArrayClass = Array::OriginalArray;
@@ -205,7 +193,7 @@ public:
} else
myArrayClass = arrayClass();
- return withArrayClass(myArrayClass).withSpeculationFromProfile(locker, profile, makeSafe);
+ return ArrayMode(type(), myArrayClass, mySpeculation, conversion());
}
ArrayMode withType(Array::Type type) const
@@ -223,9 +211,9 @@ public:
return ArrayMode(type, arrayClass(), speculation(), conversion);
}
- ArrayMode refine(Graph&, Node*, SpeculatedType base, SpeculatedType index, SpeculatedType value = SpecNone) const;
+ ArrayMode refine(SpeculatedType base, SpeculatedType index, SpeculatedType value = SpecNone, NodeFlags = 0) const;
- bool alreadyChecked(Graph&, Node*, const AbstractValue&) const;
+ bool alreadyChecked(Graph&, Node*, AbstractValue&) const;
void dump(PrintStream&) const;
@@ -294,13 +282,10 @@ public:
{
switch (type()) {
case Array::SelectUsingPredictions:
- case Array::SelectUsingArguments:
case Array::Unprofiled:
- case Array::Undecided:
case Array::ForceExit:
case Array::Generic:
- case Array::DirectArguments:
- case Array::ScopedArguments:
+ case Array::Arguments:
return false;
default:
return true;
@@ -310,6 +295,7 @@ public:
bool lengthNeedsStorage() const
{
switch (type()) {
+ case Array::Undecided:
case Array::Int32:
case Array::Double:
case Array::Contiguous:
@@ -325,9 +311,11 @@ public:
{
switch (type()) {
case Array::String:
- case Array::DirectArguments:
- case Array::ScopedArguments:
return ArrayMode(Array::Generic);
+#if USE(JSVALUE32_64)
+ case Array::Arguments:
+ return ArrayMode(Array::Generic);
+#endif
default:
return *this;
}
@@ -337,10 +325,10 @@ public:
{
switch (type()) {
case Array::SelectUsingPredictions:
- case Array::SelectUsingArguments:
case Array::Unprofiled:
case Array::ForceExit:
case Array::Generic:
+ case Array::Undecided:
return false;
default:
return true;
@@ -374,7 +362,6 @@ public:
case Array::Int32:
case Array::Double:
case Array::Contiguous:
- case Array::Undecided:
case Array::ArrayStorage:
return true;
default:
@@ -410,7 +397,7 @@ public:
case Array::ArrayStorage:
return arrayModesWithIndexingShape(ArrayStorageShape);
case Array::SlowPutArrayStorage:
- return arrayModesWithIndexingShapes(SlowPutArrayStorageShape, ArrayStorageShape);
+ return arrayModesWithIndexingShape(SlowPutArrayStorageShape);
default:
return asArrayModes(NonArray);
}
@@ -466,14 +453,7 @@ private:
}
}
- ArrayModes arrayModesWithIndexingShapes(IndexingType shape1, IndexingType shape2) const
- {
- ArrayModes arrayMode1 = arrayModesWithIndexingShape(shape1);
- ArrayModes arrayMode2 = arrayModesWithIndexingShape(shape2);
- return arrayMode1 | arrayMode2;
- }
-
- bool alreadyChecked(Graph&, Node*, const AbstractValue&, IndexingType shape) const;
+ bool alreadyChecked(Graph&, Node*, AbstractValue&, IndexingType shape) const;
union {
struct {
diff --git a/Source/JavaScriptCore/dfg/DFGArrayifySlowPathGenerator.h b/Source/JavaScriptCore/dfg/DFGArrayifySlowPathGenerator.h
index 9019eb18c..9c7d47a42 100644
--- a/Source/JavaScriptCore/dfg/DFGArrayifySlowPathGenerator.h
+++ b/Source/JavaScriptCore/dfg/DFGArrayifySlowPathGenerator.h
@@ -26,6 +26,8 @@
#ifndef DFGArrayifySlowPathGenerator_h
#define DFGArrayifySlowPathGenerator_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGArrayMode.h"
@@ -101,7 +103,10 @@ protected:
jit->callOperation(operationEnsureDouble, m_tempGPR, m_baseGPR);
break;
case Array::Contiguous:
- jit->callOperation(operationEnsureContiguous, m_tempGPR, m_baseGPR);
+ if (m_arrayMode.conversion() == Array::RageConvert)
+ jit->callOperation(operationRageEnsureContiguous, m_tempGPR, m_baseGPR);
+ else
+ jit->callOperation(operationEnsureContiguous, m_tempGPR, m_baseGPR);
break;
case Array::ArrayStorage:
case Array::SlowPutArrayStorage:
@@ -117,14 +122,23 @@ protected:
if (m_op == ArrayifyToStructure) {
ASSERT(m_structure);
m_badIndexingTypeJump.fill(
- jit, jit->m_jit.branchWeakStructure(MacroAssembler::NotEqual, MacroAssembler::Address(m_baseGPR, JSCell::structureIDOffset()), m_structure));
+ jit, jit->m_jit.branchWeakPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(m_baseGPR, JSCell::structureOffset()),
+ m_structure));
} else {
+ // Alas, we need to reload the structure because silent spilling does not save
+ // temporaries. Nor would it be useful for it to do so. Either way we're talking
+ // about a load.
+ jit->m_jit.loadPtr(
+ MacroAssembler::Address(m_baseGPR, JSCell::structureOffset()), m_structureGPR);
+
// Finally, check that we have the kind of array storage that we wanted to get.
// Note that this is a backwards speculation check, which will result in the
// bytecode operation corresponding to this arrayification being reexecuted.
// That's fine, since arrayification is not user-visible.
jit->m_jit.load8(
- MacroAssembler::Address(m_baseGPR, JSCell::indexingTypeOffset()), m_structureGPR);
+ MacroAssembler::Address(m_structureGPR, Structure::indexingTypeOffset()), m_structureGPR);
m_badIndexingTypeJump.fill(
jit, jit->jumpSlowForUnwantedArrayMode(m_structureGPR, m_arrayMode));
}
diff --git a/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.cpp
index 9d8c710de..ca770681a 100644
--- a/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.cpp
+++ b/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.cpp
@@ -28,13 +28,12 @@
#if ENABLE(DFG_JIT)
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
-AtTailAbstractState::AtTailAbstractState(Graph& graph)
- : m_graph(graph)
- , m_block(0)
+AtTailAbstractState::AtTailAbstractState()
+ : m_block(0)
{
}
@@ -48,7 +47,7 @@ void AtTailAbstractState::createValueForNode(Node* node)
AbstractValue& AtTailAbstractState::forNode(Node* node)
{
HashMap<Node*, AbstractValue>::iterator iter = m_block->ssa->valuesAtTail.find(node);
- DFG_ASSERT(m_graph, node, iter != m_block->ssa->valuesAtTail.end());
+ ASSERT(iter != m_block->ssa->valuesAtTail.end());
return iter->value;
}
diff --git a/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.h b/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.h
index cd6a08001..a994bf8d6 100644
--- a/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.h
+++ b/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.h
@@ -26,6 +26,8 @@
#ifndef DFGAtTailAbstractState_h
#define DFGAtTailAbstractState_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGAbstractValue.h"
@@ -36,7 +38,7 @@ namespace JSC { namespace DFG {
class AtTailAbstractState {
public:
- AtTailAbstractState(Graph&);
+ AtTailAbstractState();
~AtTailAbstractState();
@@ -54,16 +56,14 @@ public:
bool isValid() { return m_block->cfaDidFinish; }
- StructureClobberState structureClobberState() const { return m_block->cfaStructureClobberStateAtTail; }
-
void setDidClobber(bool) { }
- void setStructureClobberState(StructureClobberState state) { RELEASE_ASSERT(state == m_block->cfaStructureClobberStateAtTail); }
void setIsValid(bool isValid) { m_block->cfaDidFinish = isValid; }
void setBranchDirection(BranchDirection) { }
void setFoundConstants(bool) { }
+ bool haveStructures() const { return true; } // It's always safe to return true.
+ void setHaveStructures(bool) { }
private:
- Graph& m_graph;
BasicBlock* m_block;
};
diff --git a/Source/JavaScriptCore/dfg/DFGAvailability.cpp b/Source/JavaScriptCore/dfg/DFGAvailability.cpp
index 0d998abda..669c2b439 100644
--- a/Source/JavaScriptCore/dfg/DFGAvailability.cpp
+++ b/Source/JavaScriptCore/dfg/DFGAvailability.cpp
@@ -29,7 +29,6 @@
#if ENABLE(DFG_JIT)
#include "DFGNode.h"
-#include "JSCInlines.h"
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGAvailability.h b/Source/JavaScriptCore/dfg/DFGAvailability.h
index 507d816aa..fd9bf6529 100644
--- a/Source/JavaScriptCore/dfg/DFGAvailability.h
+++ b/Source/JavaScriptCore/dfg/DFGAvailability.h
@@ -81,26 +81,10 @@ public:
return withNode(unavailableMarker());
}
- void setFlush(FlushedAt flushedAt)
- {
- m_flushedAt = flushedAt;
- }
-
- void setNode(Node* node)
- {
- m_node = node;
- }
-
- void setNodeUnavailable()
- {
- m_node = unavailableMarker();
- }
-
bool nodeIsUndecided() const { return !m_node; }
bool nodeIsUnavailable() const { return m_node == unavailableMarker(); }
bool hasNode() const { return !nodeIsUndecided() && !nodeIsUnavailable(); }
- bool shouldUseNode() const { return !isFlushUseful() && hasNode(); }
Node* node() const
{
@@ -110,12 +94,6 @@ public:
}
FlushedAt flushedAt() const { return m_flushedAt; }
- bool isFlushUseful() const
- {
- return flushedAt().format() != DeadFlush && flushedAt().format() != ConflictingFlush;
- }
-
- bool isDead() const { return !isFlushUseful() && !hasNode(); }
bool operator!() const { return nodeIsUnavailable() && flushedAt().format() == ConflictingFlush; }
@@ -125,11 +103,6 @@ public:
&& m_flushedAt == other.m_flushedAt;
}
- bool operator!=(const Availability& other) const
- {
- return !(*this == other);
- }
-
Availability merge(const Availability& other) const
{
return Availability(
diff --git a/Source/JavaScriptCore/dfg/DFGAvailabilityMap.cpp b/Source/JavaScriptCore/dfg/DFGAvailabilityMap.cpp
deleted file mode 100644
index e319dc6f2..000000000
--- a/Source/JavaScriptCore/dfg/DFGAvailabilityMap.cpp
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGAvailabilityMap.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGGraph.h"
-#include "JSCInlines.h"
-#include "OperandsInlines.h"
-#include <wtf/ListDump.h>
-
-namespace JSC { namespace DFG {
-
-void AvailabilityMap::pruneHeap()
-{
- if (m_heap.isEmpty())
- return;
-
- HashSet<Node*> possibleNodes;
-
- for (unsigned i = m_locals.size(); i--;) {
- if (m_locals[i].hasNode())
- possibleNodes.add(m_locals[i].node());
- }
-
- closeOverNodes(
- [&] (Node* node) -> bool {
- return possibleNodes.contains(node);
- },
- [&] (Node* node) -> bool {
- return possibleNodes.add(node).isNewEntry;
- });
-
- HashMap<PromotedHeapLocation, Availability> newHeap;
- for (auto pair : m_heap) {
- if (possibleNodes.contains(pair.key.base()))
- newHeap.add(pair.key, pair.value);
- }
- m_heap = newHeap;
-}
-
-void AvailabilityMap::pruneByLiveness(Graph& graph, CodeOrigin where)
-{
- Operands<Availability> localsCopy(OperandsLike, m_locals);
- graph.forAllLiveInBytecode(
- where,
- [&] (VirtualRegister reg) {
- localsCopy.operand(reg) = m_locals.operand(reg);
- });
- m_locals = localsCopy;
- pruneHeap();
-}
-
-void AvailabilityMap::clear()
-{
- m_locals.fill(Availability());
- m_heap.clear();
-}
-
-void AvailabilityMap::dump(PrintStream& out) const
-{
- out.print("{locals = ", m_locals, "; heap = ", mapDump(m_heap), "}");
-}
-
-bool AvailabilityMap::operator==(const AvailabilityMap& other) const
-{
- return m_locals == other.m_locals
- && m_heap == other.m_heap;
-}
-
-void AvailabilityMap::merge(const AvailabilityMap& other)
-{
- for (unsigned i = other.m_locals.size(); i--;)
- m_locals[i] = other.m_locals[i].merge(m_locals[i]);
-
- for (auto pair : other.m_heap) {
- auto result = m_heap.add(pair.key, Availability());
- result.iterator->value = pair.value.merge(result.iterator->value);
- }
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGAvailabilityMap.h b/Source/JavaScriptCore/dfg/DFGAvailabilityMap.h
deleted file mode 100644
index 1cdd25b3d..000000000
--- a/Source/JavaScriptCore/dfg/DFGAvailabilityMap.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGAvailabilityMap_h
-#define DFGAvailabilityMap_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGAvailability.h"
-#include "DFGPromotedHeapLocation.h"
-
-namespace JSC { namespace DFG {
-
-struct AvailabilityMap {
- void pruneHeap();
- void pruneByLiveness(Graph&, CodeOrigin);
- void clear();
-
- void dump(PrintStream& out) const;
-
- bool operator==(const AvailabilityMap& other) const;
-
- void merge(const AvailabilityMap& other);
-
- template<typename Functor>
- void forEachAvailability(const Functor& functor)
- {
- for (unsigned i = m_locals.size(); i--;)
- functor(m_locals[i]);
- for (auto pair : m_heap)
- functor(pair.value);
- }
-
- template<typename HasFunctor, typename AddFunctor>
- void closeOverNodes(const HasFunctor& has, const AddFunctor& add)
- {
- bool changed;
- do {
- changed = false;
- for (auto pair : m_heap) {
- if (pair.value.hasNode() && has(pair.key.base()))
- changed |= add(pair.value.node());
- }
- } while (changed);
- }
-
- template<typename HasFunctor, typename AddFunctor>
- void closeStartingWithLocal(VirtualRegister reg, const HasFunctor& has, const AddFunctor& add)
- {
- Availability availability = m_locals.operand(reg);
- if (!availability.hasNode())
- return;
-
- if (!add(availability.node()))
- return;
-
- closeOverNodes(has, add);
- }
-
- Operands<Availability> m_locals;
- HashMap<PromotedHeapLocation, Availability> m_heap;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGAvailabilityMap_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.cpp b/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.cpp
index 2768f611f..a7ef96d70 100644
--- a/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,7 +31,7 @@
#include "DFGBasicBlockInlines.h"
#include "DFGGraph.h"
#include "DFGPhase.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -44,21 +44,17 @@ public:
bool run()
{
- m_changed = true;
- while (m_changed) {
- m_changed = false;
- for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
- BasicBlock* block = m_graph.block(blockIndex);
- if (!block)
- continue;
-
- // Prevent a tower of overflowing additions from creating a value that is out of the
- // safe 2^48 range.
- m_allowNestedOverflowingAdditions = block->size() < (1 << 16);
-
- for (unsigned indexInBlock = block->size(); indexInBlock--;)
- propagate(block->at(indexInBlock));
- }
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+
+ // Prevent a tower of overflowing additions from creating a value that is out of the
+ // safe 2^48 range.
+ m_allowNestedOverflowingAdditions = block->size() < (1 << 16);
+
+ for (unsigned indexInBlock = block->size(); indexInBlock--;)
+ propagate(block->at(indexInBlock));
}
return true;
@@ -67,17 +63,17 @@ public:
private:
bool isNotNegZero(Node* node)
{
- if (!node->isNumberConstant())
+ if (!m_graph.isNumberConstant(node))
return false;
- double value = node->asNumber();
+ double value = m_graph.valueOfNumberConstant(node);
return (value || 1.0 / value > 0.0);
}
bool isNotPosZero(Node* node)
{
- if (!node->isNumberConstant())
+ if (!m_graph.isNumberConstant(node))
return false;
- double value = node->asNumber();
+ double value = m_graph.valueOfNumberConstant(node);
return (value || 1.0 / value < 0.0);
}
@@ -85,7 +81,7 @@ private:
template<int power>
bool isWithinPowerOfTwoForConstant(Node* node)
{
- JSValue immediateValue = node->asJSValue();
+ JSValue immediateValue = node->valueOfJSConstant(codeBlock());
if (!immediateValue.isNumber())
return false;
double immediate = immediateValue.asNumber();
@@ -95,7 +91,7 @@ private:
template<int power>
bool isWithinPowerOfTwoNonRecursive(Node* node)
{
- if (!node->isNumberConstant())
+ if (node->op() != JSConstant)
return false;
return isWithinPowerOfTwoForConstant<power>(node);
}
@@ -104,9 +100,7 @@ private:
bool isWithinPowerOfTwo(Node* node)
{
switch (node->op()) {
- case DoubleConstant:
- case JSConstant:
- case Int52Constant: {
+ case JSConstant: {
return isWithinPowerOfTwoForConstant<power>(node);
}
@@ -130,9 +124,9 @@ private:
return true;
Node* shiftAmount = node->child2().node();
- if (!node->isNumberConstant())
+ if (shiftAmount->op() != JSConstant)
return false;
- JSValue immediateValue = shiftAmount->asJSValue();
+ JSValue immediateValue = shiftAmount->valueOfJSConstant(codeBlock());
if (!immediateValue.isInt32())
return false;
return immediateValue.asInt32() > 32 - power;
@@ -180,8 +174,7 @@ private:
switch (node->op()) {
case GetLocal: {
VariableAccessData* variableAccessData = node->variableAccessData();
- flags &= ~NodeBytecodeUsesAsInt; // We don't care about cross-block uses-as-int.
- m_changed |= variableAccessData->mergeFlags(flags);
+ variableAccessData->mergeFlags(flags);
break;
}
@@ -189,16 +182,7 @@ private:
VariableAccessData* variableAccessData = node->variableAccessData();
if (!variableAccessData->isLoadedFrom())
break;
- flags = variableAccessData->flags();
- RELEASE_ASSERT(!(flags & ~NodeBytecodeBackPropMask));
- flags |= NodeBytecodeUsesAsNumber; // Account for the fact that control flow may cause overflows that our modeling can't handle.
- node->child1()->mergeFlags(flags);
- break;
- }
-
- case Flush: {
- VariableAccessData* variableAccessData = node->variableAccessData();
- m_changed |= variableAccessData->mergeFlags(NodeBytecodeUsesAsValue);
+ node->child1()->mergeFlags(NodeBytecodeUsesAsValue);
break;
}
@@ -215,7 +199,6 @@ private:
case ArithIMul: {
flags |= NodeBytecodeUsesAsInt;
flags &= ~(NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsOther);
- flags &= ~NodeBytecodeUsesAsArrayIndex;
node->child1()->mergeFlags(flags);
node->child2()->mergeFlags(flags);
break;
@@ -223,10 +206,11 @@ private:
case StringCharCodeAt: {
node->child1()->mergeFlags(NodeBytecodeUsesAsValue);
- node->child2()->mergeFlags(NodeBytecodeUsesAsValue | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
+ node->child2()->mergeFlags(NodeBytecodeUsesAsValue | NodeBytecodeUsesAsInt);
break;
}
+ case Identity:
case UInt32ToNumber: {
node->child1()->mergeFlags(flags);
break;
@@ -259,14 +243,7 @@ private:
node->child2()->mergeFlags(flags);
break;
}
-
- case ArithClz32: {
- flags &= ~(NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsOther | ~NodeBytecodeUsesAsArrayIndex);
- flags |= NodeBytecodeUsesAsInt;
- node->child1()->mergeFlags(flags);
- break;
- }
-
+
case ArithSub: {
if (isNotNegZero(node->child1().node()) || isNotPosZero(node->child2().node()))
flags &= ~NodeBytecodeNeedsNegZero;
@@ -319,22 +296,27 @@ private:
}
case ArithMod: {
- flags |= NodeBytecodeUsesAsNumber;
+ flags |= NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero;
flags &= ~NodeBytecodeUsesAsOther;
node->child1()->mergeFlags(flags);
- node->child2()->mergeFlags(flags & ~NodeBytecodeNeedsNegZero);
+ node->child2()->mergeFlags(flags);
break;
}
case GetByVal: {
node->child1()->mergeFlags(NodeBytecodeUsesAsValue);
- node->child2()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
+ node->child2()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt);
+ break;
+ }
+
+ case GetMyArgumentByValSafe: {
+ node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt);
break;
}
case NewArrayWithSize: {
- node->child1()->mergeFlags(NodeBytecodeUsesAsValue | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
+ node->child1()->mergeFlags(NodeBytecodeUsesAsValue | NodeBytecodeUsesAsInt);
break;
}
@@ -342,18 +324,17 @@ private:
// Negative zero is not observable. NaN versus undefined are only observable
// in that you would get a different exception message. So, like, whatever: we
// claim here that NaN v. undefined is observable.
- node->child1()->mergeFlags(NodeBytecodeUsesAsInt | NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsArrayIndex);
+ node->child1()->mergeFlags(NodeBytecodeUsesAsInt | NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther);
break;
}
case StringCharAt: {
node->child1()->mergeFlags(NodeBytecodeUsesAsValue);
- node->child2()->mergeFlags(NodeBytecodeUsesAsValue | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
+ node->child2()->mergeFlags(NodeBytecodeUsesAsValue | NodeBytecodeUsesAsInt);
break;
}
- case ToString:
- case CallStringConstructor: {
+ case ToString: {
node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther);
break;
}
@@ -366,7 +347,7 @@ private:
case PutByValDirect:
case PutByVal: {
m_graph.varArgChild(node, 0)->mergeFlags(NodeBytecodeUsesAsValue);
- m_graph.varArgChild(node, 1)->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
+ m_graph.varArgChild(node, 1)->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt);
m_graph.varArgChild(node, 2)->mergeFlags(NodeBytecodeUsesAsValue);
break;
}
@@ -394,19 +375,9 @@ private:
// then -0 and 0 are treated the same.
node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther);
break;
- case SwitchCell:
- // There is currently no point to being clever here since this is used for switching
- // on objects.
- mergeDefaultFlags(node);
- break;
}
break;
}
-
- case Identity:
- // This would be trivial to handle but we just assert that we cannot see these yet.
- RELEASE_ASSERT_NOT_REACHED();
- break;
// Note: ArithSqrt, ArithSin, and ArithCos and other math intrinsics don't have special
// rules in here because they are always followed by Phantoms to signify that if the
@@ -421,7 +392,6 @@ private:
}
bool m_allowNestedOverflowingAdditions;
- bool m_changed;
};
bool performBackwardsPropagation(Graph& graph)
diff --git a/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.h b/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.h
index 47e71919b..438684657 100644
--- a/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGBackwardsPropagationPhase_h
#define DFGBackwardsPropagationPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
diff --git a/Source/JavaScriptCore/dfg/DFGBasicBlock.cpp b/Source/JavaScriptCore/dfg/DFGBasicBlock.cpp
index f83f50c46..07a972633 100644
--- a/Source/JavaScriptCore/dfg/DFGBasicBlock.cpp
+++ b/Source/JavaScriptCore/dfg/DFGBasicBlock.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,12 +28,11 @@
#if ENABLE(DFG_JIT)
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
-BasicBlock::BasicBlock(
- unsigned bytecodeBegin, unsigned numArguments, unsigned numLocals, float executionCount)
+BasicBlock::BasicBlock(unsigned bytecodeBegin, unsigned numArguments, unsigned numLocals)
: bytecodeBegin(bytecodeBegin)
, index(NoBlock)
, isOSRTarget(false)
@@ -41,8 +40,6 @@ BasicBlock::BasicBlock(
, cfaShouldRevisit(false)
, cfaFoundConstants(false)
, cfaDidFinish(true)
- , cfaStructureClobberStateAtHead(StructuresAreWatched)
- , cfaStructureClobberStateAtTail(StructuresAreWatched)
, cfaBranchDirection(InvalidBranchDirection)
#if !ASSERT_DISABLED
, isLinked(false)
@@ -52,15 +49,10 @@ BasicBlock::BasicBlock(
, variablesAtTail(numArguments, numLocals)
, valuesAtHead(numArguments, numLocals)
, valuesAtTail(numArguments, numLocals)
- , intersectionOfPastValuesAtHead(numArguments, numLocals, AbstractValue::fullTop())
- , intersectionOfCFAHasVisited(true)
- , executionCount(executionCount)
{
}
-BasicBlock::~BasicBlock()
-{
-}
+BasicBlock::~BasicBlock() { }
void BasicBlock::ensureLocals(unsigned newNumLocals)
{
@@ -68,20 +60,6 @@ void BasicBlock::ensureLocals(unsigned newNumLocals)
variablesAtTail.ensureLocals(newNumLocals);
valuesAtHead.ensureLocals(newNumLocals);
valuesAtTail.ensureLocals(newNumLocals);
- intersectionOfPastValuesAtHead.ensureLocals(newNumLocals, AbstractValue::fullTop());
-}
-
-void BasicBlock::replaceTerminal(Node* node)
-{
- NodeAndIndex result = findTerminal();
- if (!result)
- append(node);
- else {
- m_nodes.insert(result.index + 1, node);
- result.node->remove();
- }
-
- ASSERT(terminal());
}
bool BasicBlock::isInPhis(Node* node) const
@@ -102,21 +80,6 @@ bool BasicBlock::isInBlock(Node* myNode) const
return false;
}
-Node* BasicBlock::firstOriginNode()
-{
- for (Node* node : *this) {
- if (node->origin.isSet())
- return node;
- }
- RELEASE_ASSERT_NOT_REACHED();
- return nullptr;
-}
-
-NodeOrigin BasicBlock::firstOrigin()
-{
- return firstOriginNode()->origin;
-}
-
void BasicBlock::removePredecessor(BasicBlock* block)
{
for (unsigned i = 0; i < predecessors.size(); ++i) {
@@ -146,9 +109,11 @@ void BasicBlock::dump(PrintStream& out) const
}
BasicBlock::SSAData::SSAData(BasicBlock* block)
+ : flushAtHead(OperandsLike, block->variablesAtHead)
+ , flushAtTail(OperandsLike, block->variablesAtHead)
+ , availabilityAtHead(OperandsLike, block->variablesAtHead)
+ , availabilityAtTail(OperandsLike, block->variablesAtHead)
{
- availabilityAtHead.m_locals = Operands<Availability>(OperandsLike, block->variablesAtHead);
- availabilityAtTail.m_locals = Operands<Availability>(OperandsLike, block->variablesAtHead);
}
BasicBlock::SSAData::~SSAData() { }
diff --git a/Source/JavaScriptCore/dfg/DFGBasicBlock.h b/Source/JavaScriptCore/dfg/DFGBasicBlock.h
index 48f74d6ee..a3a801227 100644
--- a/Source/JavaScriptCore/dfg/DFGBasicBlock.h
+++ b/Source/JavaScriptCore/dfg/DFGBasicBlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,15 +30,14 @@
#include "DFGAbstractValue.h"
#include "DFGAvailability.h"
-#include "DFGAvailabilityMap.h"
#include "DFGBranchDirection.h"
#include "DFGFlushedAt.h"
#include "DFGNode.h"
-#include "DFGNodeOrigin.h"
-#include "DFGStructureClobberState.h"
+#include "DFGVariadicFunction.h"
#include "Operands.h"
#include <wtf/HashMap.h>
#include <wtf/HashSet.h>
+#include <wtf/OwnPtr.h>
#include <wtf/Vector.h>
namespace JSC { namespace DFG {
@@ -47,12 +46,9 @@ class Graph;
class InsertionSet;
typedef Vector<BasicBlock*, 2> PredecessorList;
-typedef Vector<Node*, 8> BlockNodeList;
struct BasicBlock : RefCounted<BasicBlock> {
- BasicBlock(
- unsigned bytecodeBegin, unsigned numArguments, unsigned numLocals,
- float executionCount);
+ BasicBlock(unsigned bytecodeBegin, unsigned numArguments, unsigned numLocals);
~BasicBlock();
void ensureLocals(unsigned newNumLocals);
@@ -61,72 +57,19 @@ struct BasicBlock : RefCounted<BasicBlock> {
bool isEmpty() const { return !size(); }
Node*& at(size_t i) { return m_nodes[i]; }
Node* at(size_t i) const { return m_nodes[i]; }
- Node* tryAt(size_t i) const
- {
- if (i >= size())
- return nullptr;
- return at(i);
- }
Node*& operator[](size_t i) { return at(i); }
Node* operator[](size_t i) const { return at(i); }
-
- // Use this to find both the index of the terminal and the terminal itself in one go. May
- // return a clear NodeAndIndex if the basic block currently lacks a terminal. That may happen
- // in the middle of IR transformations within a phase but should never be the case in between
- // phases.
- //
- // The reason why this is more than just "at(size() - 1)" is that we may place non-terminal
- // liveness marking instructions after the terminal. This is supposed to happen infrequently
- // but some basic blocks - most notably return blocks - will have liveness markers for all of
- // the flushed variables right after the return.
- //
- // It turns out that doing this linear search is basically perf-neutral, so long as we force
- // the method to be inlined. Hence the ALWAYS_INLINE.
- ALWAYS_INLINE NodeAndIndex findTerminal() const
- {
- size_t i = size();
- while (i--) {
- Node* node = at(i);
- switch (node->op()) {
- case Jump:
- case Branch:
- case Switch:
- case Return:
- case Unreachable:
- return NodeAndIndex(node, i);
- // The bitter end can contain Phantoms and the like. There will probably only be one or two nodes after the terminal. They are all no-ops and will not have any checked children.
- case Check: // This is here because it's our universal no-op.
- case Phantom:
- case PhantomLocal:
- case Flush:
- break;
- default:
- return NodeAndIndex();
- }
- }
- return NodeAndIndex();
- }
-
- ALWAYS_INLINE Node* terminal() const
- {
- return findTerminal().node;
- }
-
+ Node* last() const { return at(size() - 1); }
void resize(size_t size) { m_nodes.resize(size); }
void grow(size_t size) { m_nodes.grow(size); }
void append(Node* node) { m_nodes.append(node); }
- void insertBeforeTerminal(Node* node)
+ void insertBeforeLast(Node* node)
{
- NodeAndIndex result = findTerminal();
- if (!result)
- append(node);
- else
- m_nodes.insert(result.index, node);
+ append(last());
+ at(size() - 2) = node;
}
- void replaceTerminal(Node*);
-
size_t numNodes() const { return phis.size() + size(); }
Node* node(size_t i) const
{
@@ -139,49 +82,32 @@ struct BasicBlock : RefCounted<BasicBlock> {
bool isInPhis(Node* node) const;
bool isInBlock(Node* myNode) const;
- BlockNodeList::iterator begin() { return m_nodes.begin(); }
- BlockNodeList::iterator end() { return m_nodes.end(); }
-
- Node* firstOriginNode();
- NodeOrigin firstOrigin();
-
- unsigned numSuccessors() { return terminal()->numSuccessors(); }
+ unsigned numSuccessors() { return last()->numSuccessors(); }
BasicBlock*& successor(unsigned index)
{
- return terminal()->successor(index);
+ return last()->successor(index);
}
BasicBlock*& successorForCondition(bool condition)
{
- return terminal()->successorForCondition(condition);
- }
-
- Node::SuccessorsIterable successors()
- {
- return terminal()->successors();
+ return last()->successorForCondition(condition);
}
void removePredecessor(BasicBlock* block);
void replacePredecessor(BasicBlock* from, BasicBlock* to);
- template<typename... Params>
- Node* appendNode(Graph&, SpeculatedType, Params...);
+#define DFG_DEFINE_APPEND_NODE(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) \
+ templatePre typeParams templatePost Node* appendNode(Graph&, SpeculatedType valueParamsComma valueParams);
+ DFG_VARIADIC_TEMPLATE_FUNCTION(DFG_DEFINE_APPEND_NODE)
+#undef DFG_DEFINE_APPEND_NODE
- template<typename... Params>
- Node* appendNonTerminal(Graph&, SpeculatedType, Params...);
-
- template<typename... Params>
- Node* replaceTerminal(Graph&, SpeculatedType, Params...);
+#define DFG_DEFINE_APPEND_NODE(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) \
+ templatePre typeParams templatePost Node* appendNonTerminal(Graph&, SpeculatedType valueParamsComma valueParams);
+ DFG_VARIADIC_TEMPLATE_FUNCTION(DFG_DEFINE_APPEND_NODE)
+#undef DFG_DEFINE_APPEND_NODE
void dump(PrintStream& out) const;
- void didLink()
- {
-#if !ASSERT_DISABLED
- isLinked = true;
-#endif
- }
-
// This value is used internally for block linking and OSR entry. It is mostly meaningless
// for other purposes due to inlining.
unsigned bytecodeBegin;
@@ -193,8 +119,6 @@ struct BasicBlock : RefCounted<BasicBlock> {
bool cfaShouldRevisit;
bool cfaFoundConstants;
bool cfaDidFinish;
- StructureClobberState cfaStructureClobberStateAtHead;
- StructureClobberState cfaStructureClobberStateAtTail;
BranchDirection cfaBranchDirection;
#if !ASSERT_DISABLED
bool isLinked;
@@ -210,56 +134,30 @@ struct BasicBlock : RefCounted<BasicBlock> {
Operands<AbstractValue> valuesAtHead;
Operands<AbstractValue> valuesAtTail;
- // The intersection of assumptions we have made previously at the head of this block. Note
- // that under normal circumstances, each time we run the CFA, we will get strictly more precise
- // results. But we don't actually require this to be the case. It's fine for the CFA to loosen
- // up for any odd reason. It's fine when this happens, because anything that the CFA proves
- // must be true from that point forward, except if some registered watchpoint fires, in which
- // case the code won't ever run. So, the CFA proving something less precise later on is just an
- // outcome of the CFA being imperfect; the more precise thing that it had proved earlier is no
- // less true.
- //
- // But for the purpose of OSR entry, we need to make sure that we remember what assumptions we
- // had used for optimizing any given basic block. That's what this is for.
- //
- // It's interesting that we could use this to make the CFA more precise: all future CFAs could
- // filter their results with this thing to sort of maintain maximal precision. Because we
- // expect CFA to usually be monotonically more precise each time we run it to fixpoint, this
- // would not be a productive optimization: it would make setting up a basic block more
- // expensive and would only benefit bizarre pathological cases.
- Operands<AbstractValue> intersectionOfPastValuesAtHead;
- bool intersectionOfCFAHasVisited;
-
- float executionCount;
-
// These fields are reserved for NaturalLoops.
static const unsigned numberOfInnerMostLoopIndices = 2;
unsigned innerMostLoopIndices[numberOfInnerMostLoopIndices];
struct SSAData {
- WTF_MAKE_FAST_ALLOCATED;
- public:
- AvailabilityMap availabilityAtHead;
- AvailabilityMap availabilityAtTail;
-
- bool liveAtTailIsDirty { false };
- HashSet<Node*> liveAtTail;
+ Operands<FlushedAt> flushAtHead;
+ Operands<FlushedAt> flushAtTail;
+ Operands<Availability> availabilityAtHead;
+ Operands<Availability> availabilityAtTail;
HashSet<Node*> liveAtHead;
+ HashSet<Node*> liveAtTail;
HashMap<Node*, AbstractValue> valuesAtHead;
HashMap<Node*, AbstractValue> valuesAtTail;
SSAData(BasicBlock*);
~SSAData();
};
- std::unique_ptr<SSAData> ssa;
-
+ OwnPtr<SSAData> ssa;
+
private:
friend class InsertionSet;
- BlockNodeList m_nodes;
+ Vector<Node*, 8> m_nodes;
};
-typedef Vector<BasicBlock*, 5> BlockList;
-
struct UnlinkedBlock {
BasicBlock* m_block;
bool m_needsNormalLinking;
diff --git a/Source/JavaScriptCore/dfg/DFGBasicBlockInlines.h b/Source/JavaScriptCore/dfg/DFGBasicBlockInlines.h
index 3423a0db3..7f9e38af4 100644
--- a/Source/JavaScriptCore/dfg/DFGBasicBlockInlines.h
+++ b/Source/JavaScriptCore/dfg/DFGBasicBlockInlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,29 +33,25 @@
namespace JSC { namespace DFG {
-template<typename... Params>
-Node* BasicBlock::appendNode(Graph& graph, SpeculatedType type, Params... params)
-{
- Node* result = graph.addNode(type, params...);
- append(result);
- return result;
-}
-
-template<typename... Params>
-Node* BasicBlock::appendNonTerminal(Graph& graph, SpeculatedType type, Params... params)
-{
- Node* result = graph.addNode(type, params...);
- insertBeforeTerminal(result);
- return result;
-}
-
-template<typename... Params>
-Node* BasicBlock::replaceTerminal(Graph& graph, SpeculatedType type, Params... params)
-{
- Node* result = graph.addNode(type, params...);
- replaceTerminal(result);
- return result;
-}
+#define DFG_DEFINE_APPEND_NODE(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) \
+ templatePre typeParams templatePost inline Node* BasicBlock::appendNode(Graph& graph, SpeculatedType type valueParamsComma valueParams) \
+ { \
+ Node* result = graph.addNode(type valueParamsComma valueArgs); \
+ append(result); \
+ return result; \
+ }
+ DFG_VARIADIC_TEMPLATE_FUNCTION(DFG_DEFINE_APPEND_NODE)
+#undef DFG_DEFINE_APPEND_NODE
+
+#define DFG_DEFINE_APPEND_NODE(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) \
+ templatePre typeParams templatePost inline Node* BasicBlock::appendNonTerminal(Graph& graph, SpeculatedType type valueParamsComma valueParams) \
+ { \
+ Node* result = graph.addNode(type valueParamsComma valueArgs); \
+ insertBeforeLast(result); \
+ return result; \
+ }
+ DFG_VARIADIC_TEMPLATE_FUNCTION(DFG_DEFINE_APPEND_NODE)
+#undef DFG_DEFINE_APPEND_NODE
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGBinarySwitch.cpp b/Source/JavaScriptCore/dfg/DFGBinarySwitch.cpp
new file mode 100644
index 000000000..7c35cc155
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGBinarySwitch.cpp
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGBinarySwitch.h"
+
+#if ENABLE(DFG_JIT)
+
+namespace JSC { namespace DFG {
+
+BinarySwitch::BinarySwitch(GPRReg value, const Vector<int64_t>& cases, Type type)
+ : m_value(value)
+ , m_index(0)
+ , m_caseIndex(UINT_MAX)
+ , m_medianBias(0)
+ , m_type(type)
+{
+ if (cases.isEmpty())
+ return;
+
+ for (unsigned i = 0; i < cases.size(); ++i)
+ m_cases.append(Case(cases[i], i));
+ std::sort(m_cases.begin(), m_cases.end());
+ build(0, m_cases.size());
+}
+
+bool BinarySwitch::advance(MacroAssembler& jit)
+{
+ if (m_cases.isEmpty()) {
+ m_fallThrough.append(jit.jump());
+ return false;
+ }
+
+ if (m_index == m_branches.size()) {
+ RELEASE_ASSERT(m_jumpStack.isEmpty());
+ return false;
+ }
+
+ for (;;) {
+ const BranchCode& code = m_branches[m_index++];
+ switch (code.kind) {
+ case NotEqualToFallThrough:
+ switch (m_type) {
+ case Int32:
+ m_fallThrough.append(jit.branch32(
+ MacroAssembler::NotEqual, m_value,
+ MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value))));
+ break;
+ case IntPtr:
+ m_fallThrough.append(jit.branchPtr(
+ MacroAssembler::NotEqual, m_value,
+ MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value)))));
+ break;
+ }
+ break;
+ case NotEqualToPush:
+ switch (m_type) {
+ case Int32:
+ m_jumpStack.append(jit.branch32(
+ MacroAssembler::NotEqual, m_value,
+ MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value))));
+ break;
+ case IntPtr:
+ m_jumpStack.append(jit.branchPtr(
+ MacroAssembler::NotEqual, m_value,
+ MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value)))));
+ break;
+ }
+ break;
+ case LessThanToPush:
+ switch (m_type) {
+ case Int32:
+ m_jumpStack.append(jit.branch32(
+ MacroAssembler::LessThan, m_value,
+ MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value))));
+ break;
+ case IntPtr:
+ m_jumpStack.append(jit.branchPtr(
+ MacroAssembler::LessThan, m_value,
+ MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value)))));
+ break;
+ }
+ break;
+ case Pop:
+ m_jumpStack.takeLast().link(&jit);
+ break;
+ case ExecuteCase:
+ m_caseIndex = code.index;
+ return true;
+ }
+ }
+}
+
+void BinarySwitch::build(unsigned start, unsigned end)
+{
+ unsigned size = end - start;
+
+ switch (size) {
+ case 0: {
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+
+ case 1: {
+ if (start
+ && m_cases[start - 1].value == m_cases[start].value - 1
+ && start + 1 < m_cases.size()
+ && m_cases[start + 1].value == m_cases[start].value + 1) {
+ m_branches.append(BranchCode(ExecuteCase, start));
+ break;
+ }
+
+ m_branches.append(BranchCode(NotEqualToFallThrough, start));
+ m_branches.append(BranchCode(ExecuteCase, start));
+ break;
+ }
+
+ case 2: {
+ if (m_cases[start].value + 1 == m_cases[start + 1].value
+ && start
+ && m_cases[start - 1].value == m_cases[start].value - 1
+ && start + 2 < m_cases.size()
+ && m_cases[start + 2].value == m_cases[start + 1].value + 1) {
+ m_branches.append(BranchCode(NotEqualToPush, start));
+ m_branches.append(BranchCode(ExecuteCase, start));
+ m_branches.append(BranchCode(Pop));
+ m_branches.append(BranchCode(ExecuteCase, start + 1));
+ break;
+ }
+
+ unsigned firstCase = start;
+ unsigned secondCase = start + 1;
+ if (m_medianBias)
+ std::swap(firstCase, secondCase);
+ m_medianBias ^= 1;
+
+ m_branches.append(BranchCode(NotEqualToPush, firstCase));
+ m_branches.append(BranchCode(ExecuteCase, firstCase));
+ m_branches.append(BranchCode(Pop));
+ m_branches.append(BranchCode(NotEqualToFallThrough, secondCase));
+ m_branches.append(BranchCode(ExecuteCase, secondCase));
+ break;
+ }
+
+ default: {
+ unsigned medianIndex = (start + end) / 2;
+ if (!(size & 1)) {
+ // Because end is exclusive, in the even case, this rounds up by
+ // default. Hence median bias sometimes flips to subtracing one
+ // in order to get round-down behavior.
+ medianIndex -= m_medianBias;
+ m_medianBias ^= 1;
+ }
+
+ RELEASE_ASSERT(medianIndex > start);
+ RELEASE_ASSERT(medianIndex + 1 < end);
+
+ m_branches.append(BranchCode(LessThanToPush, medianIndex));
+ m_branches.append(BranchCode(NotEqualToPush, medianIndex));
+ m_branches.append(BranchCode(ExecuteCase, medianIndex));
+
+ m_branches.append(BranchCode(Pop));
+ build(medianIndex + 1, end);
+
+ m_branches.append(BranchCode(Pop));
+ build(start, medianIndex);
+ break;
+ } }
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
diff --git a/Source/JavaScriptCore/dfg/DFGBinarySwitch.h b/Source/JavaScriptCore/dfg/DFGBinarySwitch.h
new file mode 100644
index 000000000..be39cb19a
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGBinarySwitch.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGBinarySwitch_h
+#define DFGBinarySwitch_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "GPRInfo.h"
+#include "MacroAssembler.h"
+
+namespace JSC { namespace DFG {
+
+// The BinarySwitch class makes it easy to emit a switch statement over either
+// 32-bit integers or pointers, where the switch uses a tree of branches
+// rather than a jump table. This makes it particularly useful if the case
+// values are too far apart to make a jump table practical, or if there are
+// sufficiently few cases that the total cost of log(numCases) branches is
+// less than the cost of an indirected jump.
+//
+// In an effort to simplify the logic of emitting code for each case, this
+// uses an iterator style, rather than a functor callback style. This makes
+// sense because even the iterator implementation found herein is relatively
+// simple, whereas the code it's used from is usually quite complex - one
+// example being the trie-of-trees string switch implementation, where the
+// code emitted for each case involves recursing to emit code for a sub-trie.
+//
+// Use this like so:
+//
+// BinarySwitch switch(valueReg, casesVector, BinarySwitch::Int32);
+// while (switch.advance(jit)) {
+// int value = switch.caseValue();
+// unsigned index = switch.caseIndex(); // index into casesVector, above
+// ... // generate code for this case
+// }
+// switch.fallThrough().link(&jit);
+
+class BinarySwitch {
+public:
+ enum Type {
+ Int32,
+ IntPtr
+ };
+
+ BinarySwitch(GPRReg value, const Vector<int64_t>& cases, Type);
+
+ unsigned caseIndex() const { return m_cases[m_caseIndex].index; }
+ int64_t caseValue() const { return m_cases[m_caseIndex].value; }
+
+ bool advance(MacroAssembler&);
+
+ MacroAssembler::JumpList& fallThrough() { return m_fallThrough; }
+
+private:
+ void build(unsigned start, unsigned end);
+
+ GPRReg m_value;
+
+ struct Case {
+ Case() { }
+
+ Case(int64_t value, unsigned index)
+ : value(value)
+ , index(index)
+ {
+ }
+
+ bool operator<(const Case& other) const
+ {
+ return value < other.value;
+ }
+
+ int64_t value;
+ unsigned index;
+ };
+
+ Vector<Case> m_cases;
+
+ enum BranchKind {
+ NotEqualToFallThrough,
+ NotEqualToPush,
+ LessThanToPush,
+ Pop,
+ ExecuteCase
+ };
+
+ struct BranchCode {
+ BranchCode() { }
+
+ BranchCode(BranchKind kind, unsigned index = UINT_MAX)
+ : kind(kind)
+ , index(index)
+ {
+ }
+
+ BranchKind kind;
+ unsigned index;
+ };
+
+ Vector<BranchCode> m_branches;
+
+ unsigned m_index;
+ unsigned m_caseIndex;
+ Vector<MacroAssembler::Jump> m_jumpStack;
+
+ MacroAssembler::JumpList m_fallThrough;
+
+ unsigned m_medianBias;
+
+ Type m_type;
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGBinarySwitch_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGBlockInsertionSet.cpp b/Source/JavaScriptCore/dfg/DFGBlockInsertionSet.cpp
index 7894192dc..252d638ee 100644
--- a/Source/JavaScriptCore/dfg/DFGBlockInsertionSet.cpp
+++ b/Source/JavaScriptCore/dfg/DFGBlockInsertionSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,8 +28,6 @@
#if ENABLE(DFG_JIT)
-#include "JSCInlines.h"
-
namespace JSC { namespace DFG {
BlockInsertionSet::BlockInsertionSet(Graph& graph)
@@ -49,21 +47,20 @@ void BlockInsertionSet::insert(size_t index, PassRefPtr<BasicBlock> block)
insert(BlockInsertion(index, block));
}
-BasicBlock* BlockInsertionSet::insert(size_t index, float executionCount)
+BasicBlock* BlockInsertionSet::insert(size_t index)
{
RefPtr<BasicBlock> block = adoptRef(new BasicBlock(
UINT_MAX,
m_graph.block(0)->variablesAtHead.numberOfArguments(),
- m_graph.block(0)->variablesAtHead.numberOfLocals(),
- executionCount));
+ m_graph.block(0)->variablesAtHead.numberOfLocals()));
block->isReachable = true;
insert(index, block);
return block.get();
}
-BasicBlock* BlockInsertionSet::insertBefore(BasicBlock* before, float executionCount)
+BasicBlock* BlockInsertionSet::insertBefore(BasicBlock* before)
{
- return insert(before->index, executionCount);
+ return insert(before->index);
}
bool BlockInsertionSet::execute()
diff --git a/Source/JavaScriptCore/dfg/DFGBlockInsertionSet.h b/Source/JavaScriptCore/dfg/DFGBlockInsertionSet.h
index 0bdee20cc..aa2cdc57b 100644
--- a/Source/JavaScriptCore/dfg/DFGBlockInsertionSet.h
+++ b/Source/JavaScriptCore/dfg/DFGBlockInsertionSet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGBlockInsertionSet_h
#define DFGBlockInsertionSet_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGGraph.h"
@@ -38,13 +40,13 @@ typedef WTF::Insertion<RefPtr<BasicBlock>> BlockInsertion;
class BlockInsertionSet {
public:
- BlockInsertionSet(Graph&);
+ BlockInsertionSet(Graph& graph);
~BlockInsertionSet();
- void insert(const BlockInsertion&);
- void insert(size_t index, PassRefPtr<BasicBlock>);
- BasicBlock* insert(size_t index, float executionCount);
- BasicBlock* insertBefore(BasicBlock* before, float executionCount);
+ void insert(const BlockInsertion& insertion);
+ void insert(size_t index, PassRefPtr<BasicBlock> block);
+ BasicBlock* insert(size_t index);
+ BasicBlock* insertBefore(BasicBlock* before);
bool execute();
diff --git a/Source/JavaScriptCore/dfg/DFGBlockMap.h b/Source/JavaScriptCore/dfg/DFGBlockMap.h
deleted file mode 100644
index a52546a0d..000000000
--- a/Source/JavaScriptCore/dfg/DFGBlockMap.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGBlockMap_h
-#define DFGBlockMap_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGBasicBlock.h"
-
-namespace JSC { namespace DFG {
-
-class Graph;
-
-template<typename T>
-class BlockMap {
- WTF_MAKE_FAST_ALLOCATED;
-public:
- BlockMap()
- {
- }
-
- BlockMap(Graph&);
-
- BlockIndex size() const
- {
- return m_vector.size();
- }
-
- T& atIndex(BlockIndex blockIndex)
- {
- return m_vector[blockIndex];
- }
-
- const T& atIndex(BlockIndex blockIndex) const
- {
- return m_vector[blockIndex];
- }
-
- T& at(BlockIndex blockIndex)
- {
- return m_vector[blockIndex];
- }
-
- const T& at(BlockIndex blockIndex) const
- {
- return m_vector[blockIndex];
- }
-
- T& at(BasicBlock* block)
- {
- return m_vector[block->index];
- }
-
- const T& at(BasicBlock* block) const
- {
- return m_vector[block->index];
- }
-
- T& operator[](BlockIndex blockIndex)
- {
- return m_vector[blockIndex];
- }
-
- const T& operator[](BlockIndex blockIndex) const
- {
- return m_vector[blockIndex];
- }
-
- T& operator[](BasicBlock* block)
- {
- return m_vector[block->index];
- }
-
- const T& operator[](BasicBlock* block) const
- {
- return m_vector[block->index];
- }
-
-private:
- Vector<T> m_vector;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGBlockMap_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGBlockMapInlines.h b/Source/JavaScriptCore/dfg/DFGBlockMapInlines.h
deleted file mode 100644
index e61626d80..000000000
--- a/Source/JavaScriptCore/dfg/DFGBlockMapInlines.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGBlockMapInlines_h
-#define DFGBlockMapInlines_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGBlockMap.h"
-#include "DFGGraph.h"
-
-namespace JSC { namespace DFG {
-
-template<typename T>
-BlockMap<T>::BlockMap(Graph& graph)
-{
- m_vector.resize(graph.numBlocks());
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGBlockMapInlines_h
diff --git a/Source/JavaScriptCore/dfg/DFGBlockSet.cpp b/Source/JavaScriptCore/dfg/DFGBlockSet.cpp
deleted file mode 100644
index 790e380db..000000000
--- a/Source/JavaScriptCore/dfg/DFGBlockSet.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGBlockSet.h"
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-void BlockSet::dump(PrintStream& out) const
-{
- CommaPrinter comma(" ");
- for (BlockIndex blockIndex = m_set.findBit(0, true); blockIndex < m_set.size(); blockIndex = m_set.findBit(blockIndex + 1, true))
- out.print(comma, "#", blockIndex);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGBlockSet.h b/Source/JavaScriptCore/dfg/DFGBlockSet.h
deleted file mode 100644
index b09afecd9..000000000
--- a/Source/JavaScriptCore/dfg/DFGBlockSet.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGBlockSet_h
-#define DFGBlockSet_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGBasicBlock.h"
-
-namespace JSC { namespace DFG {
-
-class Graph;
-
-class BlockSet {
-public:
- BlockSet() { }
-
- // Return true if the block was added, false if it was already present.
- bool add(BasicBlock* block)
- {
- return !m_set.set(block->index);
- }
-
- bool contains(BasicBlock* block) const
- {
- if (!block)
- return false;
- return m_set.get(block->index);
- }
-
- class iterator {
- public:
- iterator()
- : m_graph(nullptr)
- , m_set(nullptr)
- , m_index(0)
- {
- }
-
- iterator& operator++()
- {
- m_index = m_set->m_set.findBit(m_index + 1, true);
- return *this;
- }
-
- BasicBlock* operator*() const;
-
- bool operator==(const iterator& other) const
- {
- return m_index == other.m_index;
- }
-
- bool operator!=(const iterator& other) const
- {
- return !(*this == other);
- }
-
- private:
- friend class BlockSet;
-
- Graph* m_graph;
- const BlockSet* m_set;
- size_t m_index;
- };
-
- class Iterable {
- public:
- Iterable(Graph& graph, const BlockSet& set)
- : m_graph(graph)
- , m_set(set)
- {
- }
-
- iterator begin() const
- {
- iterator result;
- result.m_graph = &m_graph;
- result.m_set = &m_set;
- result.m_index = m_set.m_set.findBit(0, true);
- return result;
- }
-
- iterator end() const
- {
- iterator result;
- result.m_graph = &m_graph;
- result.m_set = &m_set;
- result.m_index = m_set.m_set.size();
- return result;
- }
-
- private:
- Graph& m_graph;
- const BlockSet& m_set;
- };
-
- Iterable iterable(Graph& graph) const
- {
- return Iterable(graph, *this);
- }
-
- void dump(PrintStream&) const;
-
-private:
- BitVector m_set;
-};
-
-class BlockAdder {
-public:
- BlockAdder(BlockSet& set)
- : m_set(set)
- {
- }
-
- bool operator()(BasicBlock* block) const
- {
- return m_set.add(block);
- }
-private:
- BlockSet& m_set;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGBlockSet_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGBlockSetInlines.h b/Source/JavaScriptCore/dfg/DFGBlockSetInlines.h
deleted file mode 100644
index df9628597..000000000
--- a/Source/JavaScriptCore/dfg/DFGBlockSetInlines.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGBlockSetInlines_h
-#define DFGBlockSetInlines_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGBlockSet.h"
-#include "DFGGraph.h"
-
-namespace JSC { namespace DFG {
-
-inline BasicBlock* BlockSet::iterator::operator*() const
-{
- return m_graph->block(m_index);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGBlockSetInlines_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGBlockWorklist.cpp b/Source/JavaScriptCore/dfg/DFGBlockWorklist.cpp
deleted file mode 100644
index 1caf9ca1c..000000000
--- a/Source/JavaScriptCore/dfg/DFGBlockWorklist.cpp
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGBlockWorklist.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGBasicBlock.h"
-
-namespace JSC { namespace DFG {
-
-BlockWorklist::BlockWorklist()
-{
-}
-
-BlockWorklist::~BlockWorklist()
-{
-}
-
-bool BlockWorklist::push(BasicBlock* block)
-{
- if (!m_seen.add(block))
- return false;
-
- m_stack.append(block);
- return true;
-}
-
-BasicBlock* BlockWorklist::pop()
-{
- if (m_stack.isEmpty())
- return nullptr;
-
- return m_stack.takeLast();
-}
-
-PostOrderBlockWorklist::PostOrderBlockWorklist()
-{
-}
-
-PostOrderBlockWorklist::~PostOrderBlockWorklist()
-{
-}
-
-bool PostOrderBlockWorklist::pushPre(BasicBlock* block)
-{
- return m_worklist.push(block, PreOrder);
-}
-
-void PostOrderBlockWorklist::pushPost(BasicBlock* block)
-{
- m_worklist.forcePush(block, PostOrder);
-}
-
-BlockWithOrder PostOrderBlockWorklist::pop()
-{
- BlockWith<VisitOrder> result = m_worklist.pop();
- return BlockWithOrder(result.block, result.data);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGBlockWorklist.h b/Source/JavaScriptCore/dfg/DFGBlockWorklist.h
deleted file mode 100644
index 5b39e1cbd..000000000
--- a/Source/JavaScriptCore/dfg/DFGBlockWorklist.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGBlockWorklist_h
-#define DFGBlockWorklist_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGBasicBlock.h"
-#include "DFGBlockSet.h"
-#include <wtf/Vector.h>
-
-namespace JSC { namespace DFG {
-
-struct BasicBlock;
-
-class BlockWorklist {
-public:
- BlockWorklist();
- ~BlockWorklist();
-
- bool push(BasicBlock*); // Returns true if we didn't know about the block before.
-
- bool notEmpty() const { return !m_stack.isEmpty(); }
- BasicBlock* pop();
-
-private:
- BlockSet m_seen;
- Vector<BasicBlock*, 16> m_stack;
-};
-
-// When you say BlockWith<int> you should read it as "block with an int".
-template<typename T>
-struct BlockWith {
- BlockWith()
- : block(nullptr)
- {
- }
-
- BlockWith(BasicBlock* block, const T& data)
- : block(block)
- , data(data)
- {
- }
-
- explicit operator bool() const { return block; }
-
- BasicBlock* block;
- T data;
-};
-
-// Extended block worklist is useful for enqueueing some meta-data along with the block. It also
-// permits forcibly enqueueing things even if the block has already been seen. It's useful for
-// things like building a spanning tree, in which case T (the auxiliary payload) would be the
-// successor index.
-template<typename T>
-class ExtendedBlockWorklist {
-public:
- ExtendedBlockWorklist() { }
-
- void forcePush(const BlockWith<T>& entry)
- {
- m_stack.append(entry);
- }
-
- void forcePush(BasicBlock* block, const T& data)
- {
- forcePush(BlockWith<T>(block, data));
- }
-
- bool push(const BlockWith<T>& entry)
- {
- if (!m_seen.add(entry.block))
- return false;
-
- forcePush(entry);
- return true;
- }
-
- bool push(BasicBlock* block, const T& data)
- {
- return push(BlockWith<T>(block, data));
- }
-
- bool notEmpty() const { return !m_stack.isEmpty(); }
-
- BlockWith<T> pop()
- {
- if (m_stack.isEmpty())
- return BlockWith<T>();
-
- return m_stack.takeLast();
- }
-
-private:
- BlockSet m_seen;
- Vector<BlockWith<T>> m_stack;
-};
-
-enum VisitOrder {
- PreOrder,
- PostOrder
-};
-
-struct BlockWithOrder {
- BlockWithOrder()
- : block(nullptr)
- , order(PreOrder)
- {
- }
-
- BlockWithOrder(BasicBlock* block, VisitOrder order)
- : block(block)
- , order(order)
- {
- }
-
- explicit operator bool() const { return block; }
-
- BasicBlock* block;
- VisitOrder order;
-};
-
-// Block worklist suitable for post-order traversal.
-class PostOrderBlockWorklist {
-public:
- PostOrderBlockWorklist();
- ~PostOrderBlockWorklist();
-
- bool pushPre(BasicBlock*);
- void pushPost(BasicBlock*);
-
- bool push(BasicBlock* block, VisitOrder order = PreOrder)
- {
- switch (order) {
- case PreOrder:
- return pushPre(block);
- case PostOrder:
- pushPost(block);
- return true;
- }
- RELEASE_ASSERT_NOT_REACHED();
- return false;
- }
- bool push(const BlockWithOrder& data)
- {
- return push(data.block, data.order);
- }
-
- bool notEmpty() const { return m_worklist.notEmpty(); }
- BlockWithOrder pop();
-
-private:
- ExtendedBlockWorklist<VisitOrder> m_worklist;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGBlockWorklist_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGBranchDirection.h b/Source/JavaScriptCore/dfg/DFGBranchDirection.h
index dcdde27f8..8bbe3c635 100644
--- a/Source/JavaScriptCore/dfg/DFGBranchDirection.h
+++ b/Source/JavaScriptCore/dfg/DFGBranchDirection.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGBranchDirection_h
#define DFGBranchDirection_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
@@ -50,7 +52,7 @@ static inline const char* branchDirectionToString(BranchDirection branchDirectio
{
switch (branchDirection) {
case InvalidBranchDirection:
- return "InvalidBranchDirection";
+ return "Invalid";
case TakeTrue:
return "TakeTrue";
case TakeFalse:
@@ -58,9 +60,6 @@ static inline const char* branchDirectionToString(BranchDirection branchDirectio
case TakeBoth:
return "TakeBoth";
}
-
- RELEASE_ASSERT_NOT_REACHED();
- return "InvalidBranchDirection";
}
static inline bool isKnownDirection(BranchDirection branchDirection)
@@ -84,15 +83,6 @@ static inline bool branchCondition(BranchDirection branchDirection)
} } // namespace JSC::DFG
-namespace WTF {
-
-inline void printInternal(PrintStream& out, JSC::DFG::BranchDirection direction)
-{
- out.print(JSC::DFG::branchDirectionToString(direction));
-}
-
-} // namespace WTF
-
#endif // ENABLE(DFG_JIT)
#endif // DFGBranchDirection_h
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
index c4e0bc583..c572e7a3e 100644
--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
@@ -1,5 +1,5 @@
-/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ /*
+ * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,23 +29,18 @@
#if ENABLE(DFG_JIT)
#include "ArrayConstructor.h"
-#include "BasicBlockLocation.h"
#include "CallLinkStatus.h"
#include "CodeBlock.h"
#include "CodeBlockWithJITType.h"
#include "DFGArrayMode.h"
#include "DFGCapabilities.h"
-#include "DFGGraph.h"
#include "DFGJITCode.h"
#include "GetByIdStatus.h"
-#include "Heap.h"
-#include "JSLexicalEnvironment.h"
-#include "JSCInlines.h"
+#include "JSActivation.h"
+#include "Operations.h"
#include "PreciseJumpTargets.h"
#include "PutByIdStatus.h"
-#include "StackAlignment.h"
#include "StringConstructor.h"
-#include "Watchdog.h"
#include <wtf/CommaPrinter.h>
#include <wtf/HashMap.h>
#include <wtf/MathExtras.h>
@@ -53,8 +48,6 @@
namespace JSC { namespace DFG {
-static const bool verbose = false;
-
class ConstantBufferKey {
public:
ConstantBufferKey()
@@ -137,17 +130,19 @@ public:
, m_graph(graph)
, m_currentBlock(0)
, m_currentIndex(0)
- , m_constantUndefined(graph.freeze(jsUndefined()))
- , m_constantNull(graph.freeze(jsNull()))
- , m_constantNaN(graph.freeze(jsNumber(PNaN)))
- , m_constantOne(graph.freeze(jsNumber(1)))
+ , m_constantUndefined(UINT_MAX)
+ , m_constantNull(UINT_MAX)
+ , m_constantNaN(UINT_MAX)
+ , m_constant1(UINT_MAX)
+ , m_constants(m_codeBlock->numberOfConstantRegisters())
, m_numArguments(m_codeBlock->numParameters())
, m_numLocals(m_codeBlock->m_numCalleeRegisters)
, m_parameterSlots(0)
, m_numPassedVarArgs(0)
, m_inlineStackTop(0)
+ , m_haveBuiltOperandMaps(false)
+ , m_emptyJSValueIndex(UINT_MAX)
, m_currentInstruction(0)
- , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
{
ASSERT(m_profiledBlock);
}
@@ -160,126 +155,56 @@ private:
// Just parse from m_currentIndex to the end of the current CodeBlock.
void parseCodeBlock();
-
- void ensureLocals(unsigned newNumLocals)
- {
- if (newNumLocals <= m_numLocals)
- return;
- m_numLocals = newNumLocals;
- for (size_t i = 0; i < m_graph.numBlocks(); ++i)
- m_graph.block(i)->ensureLocals(newNumLocals);
- }
// Helper for min and max.
- template<typename ChecksFunctor>
- bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
+ bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
// Handle calls. This resolves issues surrounding inlining and intrinsics.
- void handleCall(
- int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
- Node* callTarget, int argCount, int registerOffset, CallLinkStatus,
- SpeculatedType prediction);
- void handleCall(
- int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
- Node* callTarget, int argCount, int registerOffset, CallLinkStatus);
- void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset);
- void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind);
- void handleVarargsCall(Instruction* pc, NodeType op, CodeSpecializationKind);
- void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
- void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
- unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CodeSpecializationKind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
+ void handleCall(Instruction* currentInstruction, NodeType op, CodeSpecializationKind);
+ void emitFunctionChecks(const CallLinkStatus&, Node* callTarget, int registerOffset, CodeSpecializationKind);
+ void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
// Handle inlining. Return true if it succeeded, false if we need to plant a call.
- bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
- enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually };
- template<typename ChecksFunctor>
- bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks);
- template<typename ChecksFunctor>
- void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, const ChecksFunctor& insertChecks);
- void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry.
+ bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
// Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
- template<typename ChecksFunctor>
- bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
- template<typename ChecksFunctor>
- bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
- template<typename ChecksFunctor>
- bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, const ChecksFunctor& insertChecks);
+ bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
+ bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType);
+ bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind);
Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
- Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, NodeType = GetByOffset);
- Node* handleGetByOffset(SpeculatedType, Node* base, UniquedStringImpl*, PropertyOffset, NodeType = GetByOffset);
-
- // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
- // check the validity of the condition, but it may return a null one if it encounters a contradiction.
- ObjectPropertyCondition presenceLike(
- JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
-
- // Attempt to watch the presence of a property. It will watch that the property is present in the same
- // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint.
- // Returns true if this all works out.
- bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
- void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&);
-
- // Works with both GetByIdVariant and the setter form of PutByIdVariant.
- template<typename VariantType>
- Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&);
-
- Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
-
+ Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset);
+ void handleGetByOffset(
+ int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
+ PropertyOffset);
void handleGetById(
int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
const GetByIdStatus&);
- void emitPutById(
- Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect);
- void handlePutById(
- Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
- bool isDirect);
-
- // Either register a watchpoint or emit a check for this condition. Returns false if the
- // condition no longer holds, and therefore no reasonable check can be emitted.
- bool check(const ObjectPropertyCondition&);
-
- GetByOffsetMethod promoteToConstant(GetByOffsetMethod);
-
- // Either register a watchpoint or emit a check for this condition. It must be a Presence
- // condition. It will attempt to promote a Presence condition to an Equivalence condition.
- // Emits code for the loaded value that the condition guards, and returns a node containing
- // the loaded value. Returns null if the condition no longer holds.
- GetByOffsetMethod planLoad(const ObjectPropertyCondition&);
- Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset);
- Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset);
-
- // Calls check() for each condition in the set: that is, it either emits checks or registers
- // watchpoints (or a combination of the two) to make the conditions hold. If any of those
- // conditions are no longer checkable, returns false.
- bool check(const ObjectPropertyConditionSet&);
-
- // Calls check() for those conditions that aren't the slot base, and calls load() for the slot
- // base. Does a combination of watchpoint registration and check emission to guard the
- // conditions, and emits code to load the value from the slot base. Returns a node containing
- // the loaded value. Returns null if any of the conditions were no longer checkable.
- GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&);
- Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset);
+ Node* getScope(bool skipTop, unsigned skipCount);
+
+ // Prepare to parse a block.
void prepareToParseBlock();
- void clearCaches();
-
// Parse a single basic block of bytecode instructions.
bool parseBlock(unsigned limit);
// Link block successors.
void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
- VariableAccessData* newVariableAccessData(VirtualRegister operand)
+ VariableAccessData* newVariableAccessData(VirtualRegister operand, bool isCaptured)
{
ASSERT(!operand.isConstant());
- m_graph.m_variableAccessData.append(VariableAccessData(operand));
+ m_graph.m_variableAccessData.append(VariableAccessData(operand, isCaptured));
return &m_graph.m_variableAccessData.last();
}
// Get/Set the operands/result of a bytecode instruction.
Node* getDirect(VirtualRegister operand)
{
- ASSERT(!operand.isConstant());
+ // Is this a constant?
+ if (operand.isConstant()) {
+ unsigned constant = operand.toConstantIndex();
+ ASSERT(constant < m_constants.size());
+ return getJSConstant(constant);
+ }
// Is this an argument?
if (operand.isArgument())
@@ -291,78 +216,28 @@ private:
Node* get(VirtualRegister operand)
{
- if (operand.isConstant()) {
- unsigned constantIndex = operand.toConstantIndex();
- unsigned oldSize = m_constants.size();
- if (constantIndex >= oldSize || !m_constants[constantIndex]) {
- const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
- JSValue value = codeBlock.getConstant(operand.offset());
- SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
- if (constantIndex >= oldSize) {
- m_constants.grow(constantIndex + 1);
- for (unsigned i = oldSize; i < m_constants.size(); ++i)
- m_constants[i] = nullptr;
- }
-
- Node* constantNode = nullptr;
- if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
- constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
- else
- constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
- m_constants[constantIndex] = constantNode;
- }
- ASSERT(m_constants[constantIndex]);
- return m_constants[constantIndex];
- }
-
if (inlineCallFrame()) {
if (!inlineCallFrame()->isClosureCall) {
JSFunction* callee = inlineCallFrame()->calleeConstant();
if (operand.offset() == JSStack::Callee)
- return weakJSConstant(callee);
- }
- } else if (operand.offset() == JSStack::Callee) {
- // We have to do some constant-folding here because this enables CreateThis folding. Note
- // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
- // case if the function is a singleton then we already know it.
- if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())) {
- InferredValue* singleton = executable->singletonFunction();
- if (JSValue value = singleton->inferredValue()) {
- m_graph.watchpoints().addLazily(singleton);
- JSFunction* function = jsCast<JSFunction*>(value);
- return weakJSConstant(function);
- }
+ return cellConstant(callee);
+ if (operand.offset() == JSStack::ScopeChain)
+ return cellConstant(callee->scope());
}
+ } else if (operand.offset() == JSStack::Callee)
return addToGraph(GetCallee);
- }
+ else if (operand.offset() == JSStack::ScopeChain)
+ return addToGraph(GetMyScope);
return getDirect(m_inlineStackTop->remapOperand(operand));
}
- enum SetMode {
- // A normal set which follows a two-phase commit that spans code origins. During
- // the current code origin it issues a MovHint, and at the start of the next
- // code origin there will be a SetLocal. If the local needs flushing, the second
- // SetLocal will be preceded with a Flush.
- NormalSet,
-
- // A set where the SetLocal happens immediately and there is still a Flush. This
- // is relevant when assigning to a local in tricky situations for the delayed
- // SetLocal logic but where we know that we have not performed any side effects
- // within this code origin. This is a safe replacement for NormalSet anytime we
- // know that we have not yet performed side effects in this code origin.
- ImmediateSetWithFlush,
-
- // A set where the SetLocal happens immediately and we do not Flush it even if
- // this is a local that is marked as needing it. This is relevant when
- // initializing locals at the top of a function.
- ImmediateNakedSet
- };
+ enum SetMode { NormalSet, ImmediateSet };
Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
{
addToGraph(MovHint, OpInfo(operand.offset()), value);
-
- DelayedSetLocal delayed(currentCodeOrigin(), operand, value);
+
+ DelayedSetLocal delayed = DelayedSetLocal(operand, value);
if (setMode == NormalSet) {
m_setLocalQueue.append(delayed);
@@ -371,13 +246,6 @@ private:
return delayed.execute(this, setMode);
}
-
- void processSetLocalQueue()
- {
- for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
- m_setLocalQueue[i].execute(this);
- m_setLocalQueue.resize(0);
- }
Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
{
@@ -387,7 +255,7 @@ private:
Node* injectLazyOperandSpeculation(Node* node)
{
ASSERT(node->op() == GetLocal);
- ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
+ ASSERT(node->codeOrigin.bytecodeIndex == m_currentIndex);
ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
LazyOperandValueProfileKey key(m_currentIndex, node->local());
SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
@@ -400,7 +268,25 @@ private:
{
unsigned local = operand.toLocal();
+ if (local < m_localWatchpoints.size()) {
+ if (VariableWatchpointSet* set = m_localWatchpoints[local]) {
+ if (JSValue value = set->inferredValue()) {
+ addToGraph(FunctionReentryWatchpoint, OpInfo(m_codeBlock->symbolTable()));
+ addToGraph(VariableWatchpoint, OpInfo(set));
+ // Note: this is very special from an OSR exit standpoint. We wouldn't be
+ // able to do this for most locals, but it works here because we're dealing
+ // with a flushed local. For most locals we would need to issue a GetLocal
+ // here and ensure that we have uses in DFG IR wherever there would have
+ // been uses in bytecode. Clearly this optimization does not do this. But
+ // that's fine, because we don't need to track liveness for captured
+ // locals, and this optimization only kicks in for captured locals.
+ return inferredConstant(value);
+ }
+ }
+ }
+
Node* node = m_currentBlock->variablesAtTail.local(local);
+ bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
// This has two goals: 1) link together variable access datas, and 2)
// try to avoid creating redundant GetLocals. (1) is required for
@@ -412,47 +298,45 @@ private:
if (node) {
variable = node->variableAccessData();
+ variable->mergeIsCaptured(isCaptured);
- switch (node->op()) {
- case GetLocal:
- return node;
- case SetLocal:
- return node->child1().node();
- default:
- break;
+ if (!isCaptured) {
+ switch (node->op()) {
+ case GetLocal:
+ return node;
+ case SetLocal:
+ return node->child1().node();
+ default:
+ break;
+ }
}
} else
- variable = newVariableAccessData(operand);
+ variable = newVariableAccessData(operand, isCaptured);
node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
m_currentBlock->variablesAtTail.local(local) = node;
return node;
}
- Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
+ Node* setLocal(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
{
- CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin;
- m_currentSemanticOrigin = semanticOrigin;
-
unsigned local = operand.toLocal();
+ bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
- if (setMode != ImmediateNakedSet) {
+ if (setMode == NormalSet) {
ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
- if (argumentPosition)
+ if (isCaptured || argumentPosition)
flushDirect(operand, argumentPosition);
- else if (m_hasDebuggerEnabled && operand == m_codeBlock->scopeRegister())
- flush(operand);
}
- VariableAccessData* variableAccessData = newVariableAccessData(operand);
+ VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
variableAccessData->mergeStructureCheckHoistingFailed(
- m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
+ m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint));
variableAccessData->mergeCheckArrayHoistingFailed(
- m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
+ m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
m_currentBlock->variablesAtTail.local(local) = node;
-
- m_currentSemanticOrigin = oldSemanticOrigin;
return node;
}
@@ -463,11 +347,13 @@ private:
ASSERT(argument < m_numArguments);
Node* node = m_currentBlock->variablesAtTail.argument(argument);
+ bool isCaptured = m_codeBlock->isCaptured(operand);
VariableAccessData* variable;
if (node) {
variable = node->variableAccessData();
+ variable->mergeIsCaptured(isCaptured);
switch (node->op()) {
case GetLocal:
@@ -478,38 +364,36 @@ private:
break;
}
} else
- variable = newVariableAccessData(operand);
+ variable = newVariableAccessData(operand, isCaptured);
node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
m_currentBlock->variablesAtTail.argument(argument) = node;
return node;
}
- Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
+ Node* setArgument(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
{
- CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin;
- m_currentSemanticOrigin = semanticOrigin;
-
unsigned argument = operand.toArgument();
ASSERT(argument < m_numArguments);
- VariableAccessData* variableAccessData = newVariableAccessData(operand);
+ bool isCaptured = m_codeBlock->isCaptured(operand);
+
+ VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
// Always flush arguments, except for 'this'. If 'this' is created by us,
// then make sure that it's never unboxed.
if (argument) {
- if (setMode != ImmediateNakedSet)
+ if (setMode == NormalSet)
flushDirect(operand);
} else if (m_codeBlock->specializationKind() == CodeForConstruct)
variableAccessData->mergeShouldNeverUnbox(true);
variableAccessData->mergeStructureCheckHoistingFailed(
- m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
+ m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint));
variableAccessData->mergeCheckArrayHoistingFailed(
- m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
+ m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
m_currentBlock->variablesAtTail.argument(argument) = node;
-
- m_currentSemanticOrigin = oldSemanticOrigin;
return node;
}
@@ -546,6 +430,18 @@ private:
return findArgumentPositionForLocal(operand);
}
+ void addConstant(JSValue value)
+ {
+ unsigned constantIndex = m_codeBlock->addConstantLazily();
+ initializeLazyWriteBarrierForConstant(
+ m_graph.m_plan.writeBarriers,
+ m_codeBlock->constants()[constantIndex],
+ m_codeBlock,
+ constantIndex,
+ m_codeBlock->ownerExecutable(),
+ value);
+ }
+
void flush(VirtualRegister operand)
{
flushDirect(m_inlineStackTop->remapOperand(operand));
@@ -558,74 +454,84 @@ private:
void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
{
+ bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
+
ASSERT(!operand.isConstant());
Node* node = m_currentBlock->variablesAtTail.operand(operand);
VariableAccessData* variable;
- if (node)
+ if (node) {
variable = node->variableAccessData();
- else
- variable = newVariableAccessData(operand);
+ variable->mergeIsCaptured(isCaptured);
+ } else
+ variable = newVariableAccessData(operand, isCaptured);
node = addToGraph(Flush, OpInfo(variable));
m_currentBlock->variablesAtTail.operand(operand) = node;
if (argumentPosition)
argumentPosition->addVariable(variable);
}
-
+
void flush(InlineStackEntry* inlineStackEntry)
{
int numArguments;
if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) {
- ASSERT(!m_hasDebuggerEnabled);
numArguments = inlineCallFrame->arguments.size();
- if (inlineCallFrame->isClosureCall)
+ if (inlineCallFrame->isClosureCall) {
flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee)));
- if (inlineCallFrame->isVarargs())
- flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ArgumentCount)));
+ flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ScopeChain)));
+ }
} else
numArguments = inlineStackEntry->m_codeBlock->numParameters();
for (unsigned argument = numArguments; argument-- > 1;)
flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument)));
- if (m_hasDebuggerEnabled)
- flush(m_codeBlock->scopeRegister());
+ for (int local = 0; local < inlineStackEntry->m_codeBlock->m_numVars; ++local) {
+ if (!inlineStackEntry->m_codeBlock->isCaptured(virtualRegisterForLocal(local)))
+ continue;
+ flushDirect(inlineStackEntry->remapOperand(virtualRegisterForLocal(local)));
+ }
}
- void flushForTerminal()
+ void flushAllArgumentsAndCapturedVariablesInInlineStack()
{
for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
flush(inlineStackEntry);
}
- void flushForReturn()
+ void flushArgumentsAndCapturedVariables()
{
flush(m_inlineStackTop);
}
-
- void flushIfTerminal(SwitchData& data)
+
+ // NOTE: Only use this to construct constants that arise from non-speculative
+ // constant folding. I.e. creating constants using this if we had constant
+ // field inference would be a bad idea, since the bytecode parser's folding
+ // doesn't handle liveness preservation.
+ Node* getJSConstantForValue(JSValue constantValue, NodeFlags flags = NodeIsStaticConstant)
{
- if (data.fallThrough.bytecodeIndex() > m_currentIndex)
- return;
-
- for (unsigned i = data.cases.size(); i--;) {
- if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
- return;
+ unsigned constantIndex;
+ if (!m_codeBlock->findConstant(constantValue, constantIndex)) {
+ addConstant(constantValue);
+ m_constants.append(ConstantRecord());
}
- flushForTerminal();
+ ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
+
+ return getJSConstant(constantIndex, flags);
}
- // Assumes that the constant should be strongly marked.
- Node* jsConstant(JSValue constantValue)
+ Node* getJSConstant(unsigned constant, NodeFlags flags = NodeIsStaticConstant)
{
- return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
- }
+ Node* node = m_constants[constant].asJSValue;
+ if (node)
+ return node;
- Node* weakJSConstant(JSValue constantValue)
- {
- return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
+ Node* result = addToGraph(JSConstant, OpInfo(constant));
+ result->mergeFlags(flags);
+ m_constants[constant].asJSValue = result;
+ return result;
}
// Helper functions to get/set the this value.
@@ -639,126 +545,265 @@ private:
set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
}
- InlineCallFrame* inlineCallFrame()
+ // Convenience methods for checking nodes for constants.
+ bool isJSConstant(Node* node)
{
- return m_inlineStackTop->m_inlineCallFrame;
+ return node->op() == JSConstant;
+ }
+ bool isInt32Constant(Node* node)
+ {
+ return isJSConstant(node) && valueOfJSConstant(node).isInt32();
+ }
+ // Convenience methods for getting constant values.
+ JSValue valueOfJSConstant(Node* node)
+ {
+ ASSERT(isJSConstant(node));
+ return m_codeBlock->getConstant(FirstConstantRegisterIndex + node->constantNumber());
+ }
+ int32_t valueOfInt32Constant(Node* node)
+ {
+ ASSERT(isInt32Constant(node));
+ return valueOfJSConstant(node).asInt32();
}
+
+ // This method returns a JSConstant with the value 'undefined'.
+ Node* constantUndefined()
+ {
+ // Has m_constantUndefined been set up yet?
+ if (m_constantUndefined == UINT_MAX) {
+ // Search the constant pool for undefined, if we find it, we can just reuse this!
+ unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
+ for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) {
+ JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined);
+ if (testMe.isUndefined())
+ return getJSConstant(m_constantUndefined);
+ }
- CodeOrigin currentCodeOrigin()
+ // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
+ ASSERT(m_constants.size() == numberOfConstants);
+ addConstant(jsUndefined());
+ m_constants.append(ConstantRecord());
+ ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
+ }
+
+ // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
+ ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined());
+ return getJSConstant(m_constantUndefined);
+ }
+
+ // This method returns a JSConstant with the value 'null'.
+ Node* constantNull()
{
- return CodeOrigin(m_currentIndex, inlineCallFrame());
+ // Has m_constantNull been set up yet?
+ if (m_constantNull == UINT_MAX) {
+ // Search the constant pool for null, if we find it, we can just reuse this!
+ unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
+ for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) {
+ JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull);
+ if (testMe.isNull())
+ return getJSConstant(m_constantNull);
+ }
+
+ // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
+ ASSERT(m_constants.size() == numberOfConstants);
+ addConstant(jsNull());
+ m_constants.append(ConstantRecord());
+ ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
+ }
+
+ // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
+ ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull());
+ return getJSConstant(m_constantNull);
}
- NodeOrigin currentNodeOrigin()
+ // This method returns a DoubleConstant with the value 1.
+ Node* one()
{
- // FIXME: We should set the forExit origin only on those nodes that can exit.
- // https://bugs.webkit.org/show_bug.cgi?id=145204
- if (m_currentSemanticOrigin.isSet())
- return NodeOrigin(m_currentSemanticOrigin, currentCodeOrigin());
- return NodeOrigin(currentCodeOrigin());
+ // Has m_constant1 been set up yet?
+ if (m_constant1 == UINT_MAX) {
+ // Search the constant pool for the value 1, if we find it, we can just reuse this!
+ unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
+ for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) {
+ JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1);
+ if (testMe.isInt32() && testMe.asInt32() == 1)
+ return getJSConstant(m_constant1);
+ }
+
+ // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
+ ASSERT(m_constants.size() == numberOfConstants);
+ addConstant(jsNumber(1));
+ m_constants.append(ConstantRecord());
+ ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
+ }
+
+ // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
+ ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32());
+ ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1);
+ return getJSConstant(m_constant1);
+ }
+
+ // This method returns a DoubleConstant with the value NaN.
+ Node* constantNaN()
+ {
+ JSValue nan = jsNaN();
+
+ // Has m_constantNaN been set up yet?
+ if (m_constantNaN == UINT_MAX) {
+ // Search the constant pool for the value NaN, if we find it, we can just reuse this!
+ unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
+ for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) {
+ JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN);
+ if (JSValue::encode(testMe) == JSValue::encode(nan))
+ return getJSConstant(m_constantNaN);
+ }
+
+ // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
+ ASSERT(m_constants.size() == numberOfConstants);
+ addConstant(nan);
+ m_constants.append(ConstantRecord());
+ ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
+ }
+
+ // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
+ ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble());
+ ASSERT(std::isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
+ return getJSConstant(m_constantNaN);
}
- BranchData* branchData(unsigned taken, unsigned notTaken)
+ Node* cellConstant(JSCell* cell)
{
- // We assume that branches originating from bytecode always have a fall-through. We
- // use this assumption to avoid checking for the creation of terminal blocks.
- ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
- BranchData* data = m_graph.m_branchData.add();
- *data = BranchData::withBytecodeIndices(taken, notTaken);
- return data;
+ HashMap<JSCell*, Node*>::AddResult result = m_cellConstantNodes.add(cell, nullptr);
+ if (result.isNewEntry)
+ result.iterator->value = addToGraph(WeakJSConstant, OpInfo(cell));
+
+ return result.iterator->value;
}
- Node* addToGraph(Node* node)
+ Node* inferredConstant(JSValue value)
{
- if (Options::verboseDFGByteCodeParsing())
- dataLog(" appended ", node, " ", Graph::opName(node->op()), "\n");
- m_currentBlock->append(node);
- return node;
+ if (value.isCell())
+ return cellConstant(value.asCell());
+ return getJSConstantForValue(value, 0);
+ }
+
+ InlineCallFrame* inlineCallFrame()
+ {
+ return m_inlineStackTop->m_inlineCallFrame;
+ }
+
+ CodeOrigin currentCodeOrigin()
+ {
+ return CodeOrigin(m_currentIndex, inlineCallFrame());
+ }
+
+ bool canFold(Node* node)
+ {
+ if (Options::validateFTLOSRExitLiveness()) {
+ // The static folding that the bytecode parser does results in the DFG
+ // being able to do some DCE that the bytecode liveness analysis would
+ // miss. Hence, we disable the static folding if we're validating FTL OSR
+ // exit liveness. This may be brutish, but this validator is powerful
+ // enough that it's worth it.
+ return false;
+ }
+
+ return node->isStronglyProvedConstantIn(inlineCallFrame());
+ }
+
+ // Our codegen for constant strict equality performs a bitwise comparison,
+ // so we can only select values that have a consistent bitwise identity.
+ bool isConstantForCompareStrictEq(Node* node)
+ {
+ if (!node->isConstant())
+ return false;
+ JSValue value = valueOfJSConstant(node);
+ return value.isBoolean() || value.isUndefinedOrNull();
}
Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
{
Node* result = m_graph.addNode(
- SpecNone, op, currentNodeOrigin(), Edge(child1), Edge(child2),
- Edge(child3));
- return addToGraph(result);
+ SpecNone, op, currentCodeOrigin(), Edge(child1), Edge(child2), Edge(child3));
+ ASSERT(op != Phi);
+ m_currentBlock->append(result);
+ return result;
}
Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
{
Node* result = m_graph.addNode(
- SpecNone, op, currentNodeOrigin(), child1, child2, child3);
- return addToGraph(result);
+ SpecNone, op, currentCodeOrigin(), child1, child2, child3);
+ ASSERT(op != Phi);
+ m_currentBlock->append(result);
+ return result;
}
Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
{
Node* result = m_graph.addNode(
- SpecNone, op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
- Edge(child3));
- return addToGraph(result);
+ SpecNone, op, currentCodeOrigin(), info, Edge(child1), Edge(child2), Edge(child3));
+ ASSERT(op != Phi);
+ m_currentBlock->append(result);
+ return result;
}
Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
{
Node* result = m_graph.addNode(
- SpecNone, op, currentNodeOrigin(), info1, info2,
+ SpecNone, op, currentCodeOrigin(), info1, info2,
Edge(child1), Edge(child2), Edge(child3));
- return addToGraph(result);
+ ASSERT(op != Phi);
+ m_currentBlock->append(result);
+ return result;
}
Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
{
Node* result = m_graph.addNode(
- SpecNone, Node::VarArg, op, currentNodeOrigin(), info1, info2,
+ SpecNone, Node::VarArg, op, currentCodeOrigin(), info1, info2,
m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
- addToGraph(result);
+ ASSERT(op != Phi);
+ m_currentBlock->append(result);
m_numPassedVarArgs = 0;
return result;
}
-
+
void addVarArgChild(Node* child)
{
m_graph.m_varArgChildren.append(Edge(child));
m_numPassedVarArgs++;
}
- Node* addCallWithoutSettingResult(
- NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
- SpeculatedType prediction)
+ Node* addCall(Instruction* currentInstruction, NodeType op)
{
- addVarArgChild(callee);
- size_t parameterSlots = JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + argCount;
- if (parameterSlots > m_parameterSlots)
- m_parameterSlots = parameterSlots;
+ SpeculatedType prediction = getPrediction();
+
+ addVarArgChild(get(VirtualRegister(currentInstruction[2].u.operand)));
+ int argCount = currentInstruction[3].u.operand;
+ if (JSStack::ThisArgument + (unsigned)argCount > m_parameterSlots)
+ m_parameterSlots = JSStack::ThisArgument + argCount;
- for (int i = 0; i < argCount; ++i)
+ int registerOffset = -currentInstruction[4].u.operand;
+ int dummyThisArgument = op == Call ? 0 : 1;
+ for (int i = 0 + dummyThisArgument; i < argCount; ++i)
addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
- return addToGraph(Node::VarArg, op, opInfo, OpInfo(prediction));
- }
-
- Node* addCall(
- int result, NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
- SpeculatedType prediction)
- {
- Node* call = addCallWithoutSettingResult(
- op, opInfo, callee, argCount, registerOffset, prediction);
- VirtualRegister resultReg(result);
- if (resultReg.isValid())
- set(resultReg, call);
+ Node* call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
+ set(VirtualRegister(currentInstruction[1].u.operand), call);
return call;
}
Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
{
- // FIXME: This should route to emitPropertyCheck, not the other way around. But currently,
- // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the
- // object's structure as soon as we make it a weakJSCosntant.
- Node* objectNode = weakJSConstant(object);
+ Node* objectNode = cellConstant(object);
addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
return objectNode;
}
+ Node* cellConstantWithStructureCheck(JSCell* object)
+ {
+ return cellConstantWithStructureCheck(object, object->structure());
+ }
+
SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
{
ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
@@ -792,8 +837,7 @@ private:
{
ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
- bool makeSafe = profile->outOfBounds(locker);
- return ArrayMode::fromObserved(locker, profile, action, makeSafe);
+ return ArrayMode::fromObserved(locker, profile, action, false);
}
ArrayMode getArrayMode(ArrayProfile* profile)
@@ -801,17 +845,32 @@ private:
return getArrayMode(profile, Array::Read);
}
- Node* makeSafe(Node* node)
+ ArrayMode getArrayModeConsideringSlowPath(ArrayProfile* profile, Array::Action action)
{
- if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
- node->mergeFlags(NodeMayOverflowInDFG);
- if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
- node->mergeFlags(NodeMayNegZeroInDFG);
+ ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+ profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
+
+ bool makeSafe =
+ m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
+ || profile->outOfBounds(locker);
+
+ ArrayMode result = ArrayMode::fromObserved(locker, profile, action, makeSafe);
+
+ return result;
+ }
+
+ Node* makeSafe(Node* node)
+ {
+ bool likelyToTakeSlowCase;
if (!isX86() && node->op() == ArithMod)
- return node;
-
- if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex))
+ likelyToTakeSlowCase = false;
+ else
+ likelyToTakeSlowCase = m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex);
+
+ if (!likelyToTakeSlowCase
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
return node;
switch (node->op()) {
@@ -820,27 +879,24 @@ private:
case ArithSub:
case ValueAdd:
case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
- node->mergeFlags(NodeMayOverflowInBaseline);
+ node->mergeFlags(NodeMayOverflow);
break;
case ArithNegate:
// Currently we can't tell the difference between a negation overflowing
// (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow
// path then we assume that it did both of those things.
- node->mergeFlags(NodeMayOverflowInBaseline);
- node->mergeFlags(NodeMayNegZeroInBaseline);
+ node->mergeFlags(NodeMayOverflow);
+ node->mergeFlags(NodeMayNegZero);
break;
case ArithMul:
- // FIXME: We should detect cases where we only overflowed but never created
- // negative zero.
- // https://bugs.webkit.org/show_bug.cgi?id=132470
if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
|| m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
- node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
+ node->mergeFlags(NodeMayOverflow | NodeMayNegZero);
else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
|| m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
- node->mergeFlags(NodeMayNegZeroInBaseline);
+ node->mergeFlags(NodeMayNegZero);
break;
default:
@@ -855,36 +911,42 @@ private:
{
ASSERT(node->op() == ArithDiv);
- if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
- node->mergeFlags(NodeMayOverflowInDFG);
- if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
- node->mergeFlags(NodeMayNegZeroInDFG);
-
// The main slow case counter for op_div in the old JIT counts only when
// the operands are not numbers. We don't care about that since we already
// have speculations in place that take care of that separately. We only
// care about when the outcome of the division is not an integer, which
// is what the special fast case counter tells us.
- if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
+ if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex)
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
return node;
- // FIXME: It might be possible to make this more granular.
- node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
+ // FIXME: It might be possible to make this more granular. The DFG certainly can
+ // distinguish between negative zero and overflow in its exit profiles.
+ node->mergeFlags(NodeMayOverflow | NodeMayNegZero);
return node;
}
- void noticeArgumentsUse()
+ bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
{
- // All of the arguments in this function need to be formatted as JSValues because we will
- // load from them in a random-access fashion and we don't want to have to switch on
- // format.
+ if (direct)
+ return true;
+
+ if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get())
+ return false;
+
+ for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
+ if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get())
+ return false;
+ }
- for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
- argument->mergeShouldNeverUnbox(true);
+ return true;
}
+ void buildOperandMapsIfNecessary();
+
VM* m_vm;
CodeBlock* m_codeBlock;
CodeBlock* m_profiledBlock;
@@ -894,30 +956,54 @@ private:
BasicBlock* m_currentBlock;
// The bytecode index of the current instruction being generated.
unsigned m_currentIndex;
- // The semantic origin of the current node if different from the current Index.
- CodeOrigin m_currentSemanticOrigin;
- FrozenValue* m_constantUndefined;
- FrozenValue* m_constantNull;
- FrozenValue* m_constantNaN;
- FrozenValue* m_constantOne;
- Vector<Node*, 16> m_constants;
+ // We use these values during code generation, and to avoid the need for
+ // special handling we make sure they are available as constants in the
+ // CodeBlock's constant pool. These variables are initialized to
+ // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
+ // constant pool, as necessary.
+ unsigned m_constantUndefined;
+ unsigned m_constantNull;
+ unsigned m_constantNaN;
+ unsigned m_constant1;
+ HashMap<JSCell*, unsigned> m_cellConstants;
+ HashMap<JSCell*, Node*> m_cellConstantNodes;
+
+ // A constant in the constant pool may be represented by more than one
+ // node in the graph, depending on the context in which it is being used.
+ struct ConstantRecord {
+ ConstantRecord()
+ : asInt32(0)
+ , asNumeric(0)
+ , asJSValue(0)
+ {
+ }
+
+ Node* asInt32;
+ Node* asNumeric;
+ Node* asJSValue;
+ };
+
+ // Track the index of the node whose result is the current value for every
+ // register value in the bytecode - argument, local, and temporary.
+ Vector<ConstantRecord, 16> m_constants;
// The number of arguments passed to the function.
unsigned m_numArguments;
// The number of locals (vars + temporaries) used in the function.
unsigned m_numLocals;
// The number of slots (in units of sizeof(Register)) that we need to
- // preallocate for arguments to outgoing calls from this frame. This
- // number includes the CallFrame slots that we initialize for the callee
- // (but not the callee-initialized CallerFrame and ReturnPC slots).
- // This number is 0 if and only if this function is a leaf.
+ // preallocate for calls emanating from this frame. This includes the
+ // size of the CallFrame, only if this is not a leaf function. (I.e.
+ // this is 0 if and only if this function is a leaf.)
unsigned m_parameterSlots;
// The number of var args passed to the next var arg node.
unsigned m_numPassedVarArgs;
HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
+ Vector<VariableWatchpointSet*, 16> m_localWatchpoints;
+
struct InlineStackEntry {
ByteCodeParser* m_byteCodeParser;
@@ -934,6 +1020,7 @@ private:
// (the machine code block, which is the transitive, though not necessarily
// direct, caller).
Vector<unsigned> m_identifierRemap;
+ Vector<unsigned> m_constantRemap;
Vector<unsigned> m_constantBufferRemap;
Vector<unsigned> m_switchRemap;
@@ -945,7 +1032,8 @@ private:
Vector<UnlinkedBlock> m_unlinkedBlocks;
// Potential block linking targets. Must be sorted by bytecodeBegin, and
- // cannot have two blocks that have the same bytecodeBegin.
+ // cannot have two blocks that have the same bytecodeBegin. For this very
+ // reason, this is not equivalent to
Vector<BasicBlock*> m_blockLinkingTargets;
// If the callsite's basic block was split into two, then this will be
@@ -967,9 +1055,7 @@ private:
// code block had gathered.
LazyOperandValueProfileParser m_lazyOperands;
- CallLinkInfoMap m_callLinkInfos;
StubInfoMap m_stubInfos;
- ByValInfoMap m_byValInfos;
// Did we see any returns? We need to handle the (uncommon but necessary)
// case where a procedure that does not return was inlined.
@@ -992,7 +1078,7 @@ private:
VirtualRegister returnValueVR,
VirtualRegister inlineCallFrameStart,
int argumentCountIncludingThis,
- InlineCallFrame::Kind);
+ CodeSpecializationKind);
~InlineStackEntry()
{
@@ -1004,7 +1090,11 @@ private:
if (!m_inlineCallFrame)
return operand;
- ASSERT(!operand.isConstant());
+ if (operand.isConstant()) {
+ VirtualRegister result = VirtualRegister(m_constantRemap[operand.toConstantIndex()]);
+ ASSERT(result.isConstant());
+ return result;
+ }
return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
}
@@ -1013,14 +1103,12 @@ private:
InlineStackEntry* m_inlineStackTop;
struct DelayedSetLocal {
- CodeOrigin m_origin;
VirtualRegister m_operand;
Node* m_value;
DelayedSetLocal() { }
- DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value)
- : m_origin(origin)
- , m_operand(operand)
+ DelayedSetLocal(VirtualRegister operand, Node* value)
+ : m_operand(operand)
, m_value(value)
{
}
@@ -1028,19 +1116,25 @@ private:
Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet)
{
if (m_operand.isArgument())
- return parser->setArgument(m_origin, m_operand, m_value, setMode);
- return parser->setLocal(m_origin, m_operand, m_value, setMode);
+ return parser->setArgument(m_operand, m_value, setMode);
+ return parser->setLocal(m_operand, m_value, setMode);
}
};
Vector<DelayedSetLocal, 2> m_setLocalQueue;
- CodeBlock* m_dfgCodeBlock;
- CallLinkStatus::ContextMap m_callContextMap;
- StubInfoMap m_dfgStubInfos;
+ // Have we built operand maps? We initialize them lazily, and only when doing
+ // inlining.
+ bool m_haveBuiltOperandMaps;
+ // Mapping between identifier names and numbers.
+ BorrowedIdentifierMap m_identifierMap;
+ // Mapping between values and constant numbers.
+ JSValueMap m_jsValueMap;
+ // Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible
+ // work-around for the fact that JSValueMap can't handle "empty" values.
+ unsigned m_emptyJSValueIndex;
Instruction* m_currentInstruction;
- bool m_hasDebuggerEnabled;
};
#define NEXT_OPCODE(name) \
@@ -1051,278 +1145,171 @@ private:
m_currentIndex += OPCODE_LENGTH(name); \
return shouldContinueParsing
-void ByteCodeParser::handleCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
-{
- ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
- handleCall(
- pc[1].u.operand, op, kind, OPCODE_LENGTH(op_call),
- pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand);
-}
-void ByteCodeParser::handleCall(
- int result, NodeType op, CodeSpecializationKind kind, unsigned instructionSize,
- int callee, int argumentCountIncludingThis, int registerOffset)
+void ByteCodeParser::handleCall(Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
{
- Node* callTarget = get(VirtualRegister(callee));
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
- CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
- m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
- m_inlineStackTop->m_callLinkInfos, m_callContextMap);
+ Node* callTarget = get(VirtualRegister(currentInstruction[2].u.operand));
- handleCall(
- result, op, InlineCallFrame::kindFor(kind), instructionSize, callTarget,
- argumentCountIncludingThis, registerOffset, callLinkStatus);
-}
-
-void ByteCodeParser::handleCall(
- int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
- Node* callTarget, int argumentCountIncludingThis, int registerOffset,
- CallLinkStatus callLinkStatus)
-{
- handleCall(
- result, op, kind, instructionSize, callTarget, argumentCountIncludingThis,
- registerOffset, callLinkStatus, getPrediction());
-}
+ CallLinkStatus callLinkStatus;
-void ByteCodeParser::handleCall(
- int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
- Node* callTarget, int argumentCountIncludingThis, int registerOffset,
- CallLinkStatus callLinkStatus, SpeculatedType prediction)
-{
- ASSERT(registerOffset <= 0);
-
- if (callTarget->isCellConstant())
- callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
-
- if (Options::verboseDFGByteCodeParsing())
- dataLog(" Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
+ if (m_graph.isConstant(callTarget))
+ callLinkStatus = CallLinkStatus(m_graph.valueOfJSConstant(callTarget)).setIsProved(true);
+ else {
+ callLinkStatus = CallLinkStatus::computeFor(m_inlineStackTop->m_profiledBlock, m_currentIndex);
+ callLinkStatus.setHasBadFunctionExitSite(m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadFunction));
+ callLinkStatus.setHasBadCacheExitSite(
+ m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint));
+ callLinkStatus.setHasBadExecutableExitSite(m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadExecutable));
+ }
if (!callLinkStatus.canOptimize()) {
// Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
// that we cannot optimize them.
- addCall(result, op, OpInfo(), callTarget, argumentCountIncludingThis, registerOffset, prediction);
+ addCall(currentInstruction, op);
return;
}
- unsigned nextOffset = m_currentIndex + instructionSize;
-
- OpInfo callOpInfo;
-
- if (handleInlining(callTarget, result, callLinkStatus, registerOffset, virtualRegisterForArgument(0, registerOffset), VirtualRegister(), 0, argumentCountIncludingThis, nextOffset, op, kind, prediction)) {
- if (m_graph.compilation())
- m_graph.compilation()->noticeInlinedCall();
- return;
- }
-
- addCall(result, op, callOpInfo, callTarget, argumentCountIncludingThis, registerOffset, prediction);
-}
+ int argumentCountIncludingThis = currentInstruction[3].u.operand;
+ int registerOffset = -currentInstruction[4].u.operand;
-void ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
-{
- ASSERT(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_construct_varargs));
-
- int result = pc[1].u.operand;
- int callee = pc[2].u.operand;
- int thisReg = pc[3].u.operand;
- int arguments = pc[4].u.operand;
- int firstFreeReg = pc[5].u.operand;
- int firstVarArgOffset = pc[6].u.operand;
-
+ int resultOperand = currentInstruction[1].u.operand;
+ unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
SpeculatedType prediction = getPrediction();
-
- Node* callTarget = get(VirtualRegister(callee));
-
- CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
- m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
- m_inlineStackTop->m_callLinkInfos, m_callContextMap);
- if (callTarget->isCellConstant())
- callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
-
- if (Options::verboseDFGByteCodeParsing())
- dataLog(" Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
-
- if (callLinkStatus.canOptimize()
- && handleInlining(callTarget, result, callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments), firstVarArgOffset, 0, m_currentIndex + OPCODE_LENGTH(op_call_varargs), op, InlineCallFrame::varargsKindFor(kind), prediction)) {
+
+ if (InternalFunction* function = callLinkStatus.internalFunction()) {
+ if (handleConstantInternalFunction(resultOperand, function, registerOffset, argumentCountIncludingThis, prediction, kind)) {
+ // This phantoming has to be *after* the code for the intrinsic, to signify that
+ // the inputs must be kept alive whatever exits the intrinsic may do.
+ addToGraph(Phantom, callTarget);
+ emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, kind);
+ return;
+ }
+
+ // Can only handle this using the generic call handler.
+ addCall(currentInstruction, op);
+ return;
+ }
+
+ Intrinsic intrinsic = callLinkStatus.intrinsicFor(kind);
+ if (intrinsic != NoIntrinsic) {
+ emitFunctionChecks(callLinkStatus, callTarget, registerOffset, kind);
+
+ if (handleIntrinsic(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
+ // This phantoming has to be *after* the code for the intrinsic, to signify that
+ // the inputs must be kept alive whatever exits the intrinsic may do.
+ addToGraph(Phantom, callTarget);
+ emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, kind);
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedCall();
+ return;
+ }
+ } else if (handleInlining(callTarget, resultOperand, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, kind)) {
if (m_graph.compilation())
m_graph.compilation()->noticeInlinedCall();
return;
}
- CallVarargsData* data = m_graph.m_callVarargsData.add();
- data->firstVarArgOffset = firstVarArgOffset;
-
- Node* thisChild = get(VirtualRegister(thisReg));
-
- Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, get(VirtualRegister(arguments)), thisChild);
- VirtualRegister resultReg(result);
- if (resultReg.isValid())
- set(resultReg, call);
+ addCall(currentInstruction, op);
}
-void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
+void ByteCodeParser::emitFunctionChecks(const CallLinkStatus& callLinkStatus, Node* callTarget, int registerOffset, CodeSpecializationKind kind)
{
Node* thisArgument;
- if (thisArgumentReg.isValid())
- thisArgument = get(thisArgumentReg);
+ if (kind == CodeForCall)
+ thisArgument = get(virtualRegisterForArgument(0, registerOffset));
else
thisArgument = 0;
- JSCell* calleeCell;
- Node* callTargetForCheck;
- if (callee.isClosureCall()) {
- calleeCell = callee.executable();
- callTargetForCheck = addToGraph(GetExecutable, callTarget);
- } else {
- calleeCell = callee.nonExecutableCallee();
- callTargetForCheck = callTarget;
+ if (callLinkStatus.isProved()) {
+ addToGraph(Phantom, callTarget, thisArgument);
+ return;
}
- ASSERT(calleeCell);
- addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck, thisArgument);
+ ASSERT(callLinkStatus.canOptimize());
+
+ if (JSFunction* function = callLinkStatus.function())
+ addToGraph(CheckFunction, OpInfo(function), callTarget, thisArgument);
+ else {
+ ASSERT(callLinkStatus.structure());
+ ASSERT(callLinkStatus.executable());
+
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(callLinkStatus.structure())), callTarget);
+ addToGraph(CheckExecutable, OpInfo(callLinkStatus.executable()), callTarget, thisArgument);
+ }
}
-void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
+void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind kind)
{
- for (int i = 0; i < argumentCountIncludingThis; ++i)
+ for (int i = kind == CodeForCall ? 0 : 1; i < argumentCountIncludingThis; ++i)
addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
}
-unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CodeSpecializationKind kind)
+bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
{
- if (verbose)
- dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
+ // First, the really simple checks: do we have an actual JS function?
+ if (!callLinkStatus.executable())
+ return false;
+ if (callLinkStatus.executable()->isHostFunction())
+ return false;
- if (m_hasDebuggerEnabled) {
- if (verbose)
- dataLog(" Failing because the debugger is in use.\n");
- return UINT_MAX;
- }
-
- FunctionExecutable* executable = callee.functionExecutable();
- if (!executable) {
- if (verbose)
- dataLog(" Failing because there is no function executable.\n");
- return UINT_MAX;
- }
+ FunctionExecutable* executable = jsCast<FunctionExecutable*>(callLinkStatus.executable());
// Does the number of arguments we're passing match the arity of the target? We currently
// inline only if the number of arguments passed is greater than or equal to the number
// arguments expected.
- if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) {
- if (verbose)
- dataLog(" Failing because of arity mismatch.\n");
- return UINT_MAX;
- }
-
- // Do we have a code block, and does the code block's size match the heuristics/requirements for
- // being an inline candidate? We might not have a code block (1) if code was thrown away,
- // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
- // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
- // to inline it if we had a static proof of what was being called; this might happen for example
- // if you call a global function, where watchpointing gives us static information. Overall,
- // it's a rare case because we expect that any hot callees would have already been compiled.
- CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
- if (!codeBlock) {
- if (verbose)
- dataLog(" Failing because no code block available.\n");
- return UINT_MAX;
- }
- CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
- codeBlock, kind, callee.isClosureCall());
- if (verbose) {
- dataLog(" Kind: ", kind, "\n");
- dataLog(" Is closure call: ", callee.isClosureCall(), "\n");
- dataLog(" Capability level: ", capabilityLevel, "\n");
- dataLog(" Might inline function: ", mightInlineFunctionFor(codeBlock, kind), "\n");
- dataLog(" Might compile function: ", mightCompileFunctionFor(codeBlock, kind), "\n");
- dataLog(" Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
- dataLog(" Needs activation: ", codeBlock->ownerExecutable()->needsActivation(), "\n");
- dataLog(" Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
- }
- if (!canInline(capabilityLevel)) {
- if (verbose)
- dataLog(" Failing because the function is not inlineable.\n");
- return UINT_MAX;
- }
-
- // Check if the caller is already too large. We do this check here because that's just
- // where we happen to also have the callee's code block, and we want that for the
- // purpose of unsetting SABI.
- if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
- codeBlock->m_shouldAlwaysBeInlined = false;
- if (verbose)
- dataLog(" Failing because the caller is too large.\n");
- return UINT_MAX;
- }
-
- // FIXME: this should be better at predicting how much bloat we will introduce by inlining
- // this function.
- // https://bugs.webkit.org/show_bug.cgi?id=127627
-
- // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
- // functions have very low fidelity profiling, and presumably they weren't very hot if they
- // haven't gotten to Baseline yet. Consider not inlining these functions.
- // https://bugs.webkit.org/show_bug.cgi?id=145503
-
- // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
- // too many levels? If either of these are detected, then don't inline. We adjust our
- // heuristics if we are dealing with a function that cannot otherwise be compiled.
+ if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis)
+ return false;
+ // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
+ // If either of these are detected, then don't inline.
unsigned depth = 0;
- unsigned recursion = 0;
-
for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
++depth;
- if (depth >= Options::maximumInliningDepth()) {
- if (verbose)
- dataLog(" Failing because depth exceeded.\n");
- return UINT_MAX;
- }
+ if (depth >= Options::maximumInliningDepth())
+ return false; // Depth exceeded.
- if (entry->executable() == executable) {
- ++recursion;
- if (recursion >= Options::maximumInliningRecursion()) {
- if (verbose)
- dataLog(" Failing because recursion detected.\n");
- return UINT_MAX;
- }
- }
+ if (entry->executable() == executable)
+ return false; // Recursion detected.
}
- if (verbose)
- dataLog(" Inlining should be possible.\n");
-
- // It might be possible to inline.
- return codeBlock->instructionCount();
-}
-
-template<typename ChecksFunctor>
-void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, const ChecksFunctor& insertChecks)
-{
- CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
+ // Do we have a code block, and does the code block's size match the heuristics/requirements for
+ // being an inline candidate? We might not have a code block if code was thrown away or if we
+ // simply hadn't actually made this call yet. We could still theoretically attempt to inline it
+ // if we had a static proof of what was being called; this might happen for example if you call a
+ // global function, where watchpointing gives us static information. Overall, it's a rare case
+ // because we expect that any hot callees would have already been compiled.
+ CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
+ if (!codeBlock)
+ return false;
+ if (!canInlineFunctionFor(codeBlock, kind, callLinkStatus.isClosureCall()))
+ return false;
- ASSERT(inliningCost(callee, argumentCountIncludingThis, specializationKind) != UINT_MAX);
+ // Now we know without a doubt that we are committed to inlining. So begin the process
+ // by checking the callee (if necessary) and making sure that arguments and the callee
+ // are flushed.
+ emitFunctionChecks(callLinkStatus, callTargetNode, registerOffset, kind);
- CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
- insertChecks(codeBlock);
-
// FIXME: Don't flush constants!
int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize;
- ensureLocals(
- VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
- JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters);
+ // Make sure that we have enough locals.
+ unsigned newNumLocals = VirtualRegister(inlineCallFrameStart).toLocal() + 1 + JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
+ if (newNumLocals > m_numLocals) {
+ m_numLocals = newNumLocals;
+ for (size_t i = 0; i < m_graph.numBlocks(); ++i)
+ m_graph.block(i)->ensureLocals(newNumLocals);
+ }
size_t argumentPositionStart = m_graph.m_argumentPositions.size();
- VirtualRegister resultReg(resultOperand);
- if (resultReg.isValid())
- resultReg = m_inlineStackTop->remapOperand(resultReg);
-
InlineStackEntry inlineStackEntry(
- this, codeBlock, codeBlock, m_graph.lastBlock(), callee.function(), resultReg,
+ this, codeBlock, codeBlock, m_graph.lastBlock(), callLinkStatus.function(),
+ m_inlineStackTop->remapOperand(VirtualRegister(resultOperand)),
(VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
// This is where the actual inlining really happens.
@@ -1336,12 +1323,15 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar
RELEASE_ASSERT(
m_inlineStackTop->m_inlineCallFrame->isClosureCall
- == callee.isClosureCall());
- if (callee.isClosureCall()) {
+ == callLinkStatus.isClosureCall());
+ if (callLinkStatus.isClosureCall()) {
VariableAccessData* calleeVariable =
- set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateNakedSet)->variableAccessData();
+ set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateSet)->variableAccessData();
+ VariableAccessData* scopeVariable =
+ set(VirtualRegister(JSStack::ScopeChain), addToGraph(GetScope, callTargetNode), ImmediateSet)->variableAccessData();
calleeVariable->mergeShouldNeverUnbox(true);
+ scopeVariable->mergeShouldNeverUnbox(true);
inlineVariableData.calleeVariable = calleeVariable;
}
@@ -1349,7 +1339,6 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar
m_graph.m_inlineVariableData.append(inlineVariableData);
parseCodeBlock();
- clearCaches(); // Reset our state now that we're back to the outer code.
m_currentIndex = oldIndex;
@@ -1362,8 +1351,20 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar
else
ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
- if (callerLinkability == CallerDoesNormalLinking)
- cancelLinkingForBlock(inlineStackEntry.m_caller, inlineStackEntry.m_callsiteBlockHead);
+ // It's possible that the callsite block head is not owned by the caller.
+ if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
+ // It's definitely owned by the caller, because the caller created new blocks.
+ // Assert that this all adds up.
+ ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_block == inlineStackEntry.m_callsiteBlockHead);
+ ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
+ inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
+ } else {
+ // It's definitely not owned by the caller. Tell the caller that he does not
+ // need to link his callsite block head, because we did it for him.
+ ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
+ ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
+ inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
+ }
linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
} else
@@ -1373,10 +1374,7 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar
// If there was a return, but no early returns, then we're done. We allow parsing of
// the caller to continue in whatever basic block we're in right now.
if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
- if (Options::verboseDFGByteCodeParsing())
- dataLog(" Allowing parsing to continue in last inlined block.\n");
-
- ASSERT(lastBlock->isEmpty() || !lastBlock->terminal());
+ ASSERT(lastBlock->isEmpty() || !lastBlock->last()->isTerminal());
// If we created new blocks then the last block needs linking, but in the
// caller. It doesn't need to be linked to, but it needs outgoing links.
@@ -1384,28 +1382,20 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar
// For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
// for release builds because this block will never serve as a potential target
// in the linker's binary search.
- if (Options::verboseDFGByteCodeParsing())
- dataLog(" Repurposing last block from ", lastBlock->bytecodeBegin, " to ", m_currentIndex, "\n");
lastBlock->bytecodeBegin = m_currentIndex;
- if (callerLinkability == CallerDoesNormalLinking) {
- if (verbose)
- dataLog("Adding unlinked block ", RawPointer(m_graph.lastBlock()), " (one return)\n");
- m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
- }
+ m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
}
m_currentBlock = m_graph.lastBlock();
- return;
+ return true;
}
- if (Options::verboseDFGByteCodeParsing())
- dataLog(" Creating new block after inlining.\n");
-
// If we get to this point then all blocks must end in some sort of terminals.
- ASSERT(lastBlock->terminal());
+ ASSERT(lastBlock->last()->isTerminal());
+
// Need to create a new basic block for the continuation at the caller.
- RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, PNaN));
+ RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals));
// Link the early returns to the basic block we're about to create.
for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
@@ -1413,441 +1403,36 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar
continue;
BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block;
ASSERT(!blockToLink->isLinked);
- Node* node = blockToLink->terminal();
+ Node* node = blockToLink->last();
ASSERT(node->op() == Jump);
- ASSERT(!node->targetBlock());
- node->targetBlock() = block.get();
+ ASSERT(node->takenBlock() == 0);
+ node->setTakenBlock(block.get());
inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
- if (verbose)
- dataLog("Marking ", RawPointer(blockToLink), " as linked (jumps to return)\n");
- blockToLink->didLink();
+#if !ASSERT_DISABLED
+ blockToLink->isLinked = true;
+#endif
}
m_currentBlock = block.get();
ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
- if (verbose)
- dataLog("Adding unlinked block ", RawPointer(block.get()), " (many returns)\n");
- if (callerLinkability == CallerDoesNormalLinking) {
- m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
- m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
- }
+ m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
+ m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
m_graph.appendBlock(block);
prepareToParseBlock();
-}
-
-void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry* inlineStackEntry, BasicBlock* block)
-{
- // It's possible that the callsite block head is not owned by the caller.
- if (!inlineStackEntry->m_unlinkedBlocks.isEmpty()) {
- // It's definitely owned by the caller, because the caller created new blocks.
- // Assert that this all adds up.
- ASSERT_UNUSED(block, inlineStackEntry->m_unlinkedBlocks.last().m_block == block);
- ASSERT(inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking);
- inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking = false;
- } else {
- // It's definitely not owned by the caller. Tell the caller that he does not
- // need to link his callsite block head, because we did it for him.
- ASSERT(inlineStackEntry->m_callsiteBlockHeadNeedsLinking);
- ASSERT_UNUSED(block, inlineStackEntry->m_callsiteBlockHead == block);
- inlineStackEntry->m_callsiteBlockHeadNeedsLinking = false;
- }
-}
-
-template<typename ChecksFunctor>
-bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks)
-{
- CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
-
- if (!inliningBalance)
- return false;
-
- bool didInsertChecks = false;
- auto insertChecksWithAccounting = [&] () {
- insertChecks(nullptr);
- didInsertChecks = true;
- };
-
- if (verbose)
- dataLog(" Considering callee ", callee, "\n");
-
- // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
- // we currently don't have any way of getting profiling information for arguments to non-JS varargs
- // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
- // and there are no callsite value profiles and native function won't have callee value profiles for
- // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
- // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
- // calling LoadVarargs twice.
- if (!InlineCallFrame::isVarargs(kind)) {
- if (InternalFunction* function = callee.internalFunction()) {
- if (handleConstantInternalFunction(resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind, insertChecksWithAccounting)) {
- RELEASE_ASSERT(didInsertChecks);
- addToGraph(Phantom, callTargetNode);
- emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
- inliningBalance--;
- return true;
- }
- RELEASE_ASSERT(!didInsertChecks);
- return false;
- }
-
- Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
- if (intrinsic != NoIntrinsic) {
- if (handleIntrinsic(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
- RELEASE_ASSERT(didInsertChecks);
- addToGraph(Phantom, callTargetNode);
- emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
- inliningBalance--;
- return true;
- }
- RELEASE_ASSERT(!didInsertChecks);
- return false;
- }
- }
-
- unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, specializationKind);
- if (myInliningCost > inliningBalance)
- return false;
-
- Instruction* savedCurrentInstruction = m_currentInstruction;
- inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, callerLinkability, insertChecks);
- inliningBalance -= myInliningCost;
- m_currentInstruction = savedCurrentInstruction;
- return true;
-}
-
-bool ByteCodeParser::handleInlining(
- Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus,
- int registerOffsetOrFirstFreeReg, VirtualRegister thisArgument,
- VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis,
- unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
-{
- if (verbose) {
- dataLog("Handling inlining...\n");
- dataLog("Stack: ", currentCodeOrigin(), "\n");
- }
- CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
-
- if (!callLinkStatus.size()) {
- if (verbose)
- dataLog("Bailing inlining.\n");
- return false;
- }
-
- if (InlineCallFrame::isVarargs(kind)
- && callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
- if (verbose)
- dataLog("Bailing inlining because of varargs.\n");
- return false;
- }
-
- unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
- if (specializationKind == CodeForConstruct)
- inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
- if (callLinkStatus.isClosureCall())
- inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
-
- // First check if we can avoid creating control flow. Our inliner does some CFG
- // simplification on the fly and this helps reduce compile times, but we can only leverage
- // this in cases where we don't need control flow diamonds to check the callee.
- if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
- int registerOffset;
-
- // Only used for varargs calls.
- unsigned mandatoryMinimum = 0;
- unsigned maxNumArguments = 0;
-
- if (InlineCallFrame::isVarargs(kind)) {
- if (FunctionExecutable* functionExecutable = callLinkStatus[0].functionExecutable())
- mandatoryMinimum = functionExecutable->parameterCount();
- else
- mandatoryMinimum = 0;
-
- // includes "this"
- maxNumArguments = std::max(
- callLinkStatus.maxNumArguments(),
- mandatoryMinimum + 1);
-
- // We sort of pretend that this *is* the number of arguments that were passed.
- argumentCountIncludingThis = maxNumArguments;
-
- registerOffset = registerOffsetOrFirstFreeReg + 1;
- registerOffset -= maxNumArguments; // includes "this"
- registerOffset -= JSStack::CallFrameHeaderSize;
- registerOffset = -WTF::roundUpToMultipleOf(
- stackAlignmentRegisters(),
- -registerOffset);
- } else
- registerOffset = registerOffsetOrFirstFreeReg;
-
- bool result = attemptToInlineCall(
- callTargetNode, resultOperand, callLinkStatus[0], registerOffset,
- argumentCountIncludingThis, nextOffset, kind, CallerDoesNormalLinking, prediction,
- inliningBalance, [&] (CodeBlock* codeBlock) {
- emitFunctionChecks(callLinkStatus[0], callTargetNode, thisArgument);
-
- // If we have a varargs call, we want to extract the arguments right now.
- if (InlineCallFrame::isVarargs(kind)) {
- int remappedRegisterOffset =
- m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
-
- ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
-
- int argumentStart = registerOffset + JSStack::CallFrameHeaderSize;
- int remappedArgumentStart =
- m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
-
- LoadVarargsData* data = m_graph.m_loadVarargsData.add();
- data->start = VirtualRegister(remappedArgumentStart + 1);
- data->count = VirtualRegister(remappedRegisterOffset + JSStack::ArgumentCount);
- data->offset = argumentsOffset;
- data->limit = maxNumArguments;
- data->mandatoryMinimum = mandatoryMinimum;
-
- addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
-
- // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
- // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
- // callTargetNode because the other 2 are still in use and alive at this point.
- addToGraph(Phantom, callTargetNode);
-
- // In DFG IR before SSA, we cannot insert control flow between after the
- // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG
- // SSA. Fortunately, we also have other reasons for not inserting control flow
- // before SSA.
-
- VariableAccessData* countVariable = newVariableAccessData(
- VirtualRegister(remappedRegisterOffset + JSStack::ArgumentCount));
- // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
- // matter very much, since our use of a SetArgument and Flushes for this local slot is
- // mostly just a formality.
- countVariable->predict(SpecInt32);
- countVariable->mergeIsProfitableToUnbox(true);
- Node* setArgumentCount = addToGraph(SetArgument, OpInfo(countVariable));
- m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
-
- set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
- for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
- VariableAccessData* variable = newVariableAccessData(
- VirtualRegister(remappedArgumentStart + argument));
- variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
-
- // For a while it had been my intention to do things like this inside the
- // prediction injection phase. But in this case it's really best to do it here,
- // because it's here that we have access to the variable access datas for the
- // inlining we're about to do.
- //
- // Something else that's interesting here is that we'd really love to get
- // predictions from the arguments loaded at the callsite, rather than the
- // arguments received inside the callee. But that probably won't matter for most
- // calls.
- if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
- ConcurrentJITLocker locker(codeBlock->m_lock);
- if (ValueProfile* profile = codeBlock->valueProfileForArgument(argument))
- variable->predict(profile->computeUpdatedPrediction(locker));
- }
-
- Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
- m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
- }
- }
- });
- if (verbose) {
- dataLog("Done inlining (simple).\n");
- dataLog("Stack: ", currentCodeOrigin(), "\n");
- dataLog("Result: ", result, "\n");
- }
- return result;
- }
-
- // We need to create some kind of switch over callee. For now we only do this if we believe that
- // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
- // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
- // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
- // we could improve that aspect of this by doing polymorphic inlining but having the profiling
- // also.
- if (!isFTL(m_graph.m_plan.mode) || !Options::enablePolymorphicCallInlining()
- || InlineCallFrame::isVarargs(kind)) {
- if (verbose) {
- dataLog("Bailing inlining (hard).\n");
- dataLog("Stack: ", currentCodeOrigin(), "\n");
- }
- return false;
- }
-
- unsigned oldOffset = m_currentIndex;
-
- bool allAreClosureCalls = true;
- bool allAreDirectCalls = true;
- for (unsigned i = callLinkStatus.size(); i--;) {
- if (callLinkStatus[i].isClosureCall())
- allAreDirectCalls = false;
- else
- allAreClosureCalls = false;
- }
-
- Node* thingToSwitchOn;
- if (allAreDirectCalls)
- thingToSwitchOn = callTargetNode;
- else if (allAreClosureCalls)
- thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
- else {
- // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
- // where it would be beneficial. It might be best to handle these cases as if all calls were
- // closure calls.
- // https://bugs.webkit.org/show_bug.cgi?id=136020
- if (verbose) {
- dataLog("Bailing inlining (mix).\n");
- dataLog("Stack: ", currentCodeOrigin(), "\n");
- }
- return false;
- }
-
- if (verbose) {
- dataLog("Doing hard inlining...\n");
- dataLog("Stack: ", currentCodeOrigin(), "\n");
- }
-
- int registerOffset = registerOffsetOrFirstFreeReg;
-
- // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
- // store the callee so that it will be accessible to all of the blocks we're about to create. We
- // get away with doing an immediate-set here because we wouldn't have performed any side effects
- // yet.
- if (verbose)
- dataLog("Register offset: ", registerOffset);
- VirtualRegister calleeReg(registerOffset + JSStack::Callee);
- calleeReg = m_inlineStackTop->remapOperand(calleeReg);
- if (verbose)
- dataLog("Callee is going to be ", calleeReg, "\n");
- setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
-
- SwitchData& data = *m_graph.m_switchData.add();
- data.kind = SwitchCell;
- addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
-
- BasicBlock* originBlock = m_currentBlock;
- if (verbose)
- dataLog("Marking ", RawPointer(originBlock), " as linked (origin of poly inline)\n");
- originBlock->didLink();
- cancelLinkingForBlock(m_inlineStackTop, originBlock);
-
- // Each inlined callee will have a landing block that it returns at. They should all have jumps
- // to the continuation block, which we create last.
- Vector<BasicBlock*> landingBlocks;
-
- // We may force this true if we give up on inlining any of the edges.
- bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
-
- if (verbose)
- dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n");
-
- for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
- m_currentIndex = oldOffset;
- RefPtr<BasicBlock> block = adoptRef(new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
- m_currentBlock = block.get();
- m_graph.appendBlock(block);
- prepareToParseBlock();
-
- Node* myCallTargetNode = getDirect(calleeReg);
-
- bool inliningResult = attemptToInlineCall(
- myCallTargetNode, resultOperand, callLinkStatus[i], registerOffset,
- argumentCountIncludingThis, nextOffset, kind, CallerLinksManually, prediction,
- inliningBalance, [&] (CodeBlock*) { });
-
- if (!inliningResult) {
- // That failed so we let the block die. Nothing interesting should have been added to
- // the block. We also give up on inlining any of the (less frequent) callees.
- ASSERT(m_currentBlock == block.get());
- ASSERT(m_graph.m_blocks.last() == block);
- m_graph.killBlockAndItsContents(block.get());
- m_graph.m_blocks.removeLast();
-
- // The fact that inlining failed means we need a slow path.
- couldTakeSlowPath = true;
- break;
- }
-
- JSCell* thingToCaseOn;
- if (allAreDirectCalls)
- thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
- else {
- ASSERT(allAreClosureCalls);
- thingToCaseOn = callLinkStatus[i].executable();
- }
- data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), block.get()));
- m_currentIndex = nextOffset;
- processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue.
- addToGraph(Jump);
- if (verbose)
- dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n");
- m_currentBlock->didLink();
- landingBlocks.append(m_currentBlock);
-
- if (verbose)
- dataLog("Finished inlining ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
- }
-
- RefPtr<BasicBlock> slowPathBlock = adoptRef(
- new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
- m_currentIndex = oldOffset;
- data.fallThrough = BranchTarget(slowPathBlock.get());
- m_graph.appendBlock(slowPathBlock);
- if (verbose)
- dataLog("Marking ", RawPointer(slowPathBlock.get()), " as linked (slow path block)\n");
- slowPathBlock->didLink();
- prepareToParseBlock();
- m_currentBlock = slowPathBlock.get();
- Node* myCallTargetNode = getDirect(calleeReg);
- if (couldTakeSlowPath) {
- addCall(
- resultOperand, callOp, OpInfo(), myCallTargetNode, argumentCountIncludingThis,
- registerOffset, prediction);
- } else {
- addToGraph(CheckBadCell);
- addToGraph(Phantom, myCallTargetNode);
- emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
-
- set(VirtualRegister(resultOperand), addToGraph(BottomValue));
- }
-
- m_currentIndex = nextOffset;
- processSetLocalQueue();
- addToGraph(Jump);
- landingBlocks.append(m_currentBlock);
-
- RefPtr<BasicBlock> continuationBlock = adoptRef(
- new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
- m_graph.appendBlock(continuationBlock);
- if (verbose)
- dataLog("Adding unlinked block ", RawPointer(continuationBlock.get()), " (continuation)\n");
- m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(continuationBlock.get()));
- prepareToParseBlock();
- m_currentBlock = continuationBlock.get();
-
- for (unsigned i = landingBlocks.size(); i--;)
- landingBlocks[i]->terminal()->targetBlock() = continuationBlock.get();
- m_currentIndex = oldOffset;
-
- if (verbose) {
- dataLog("Done inlining (hard).\n");
- dataLog("Stack: ", currentCodeOrigin(), "\n");
- }
+ // At this point we return and continue to generate code for the caller, but
+ // in the new basic block.
return true;
}
-template<typename ChecksFunctor>
-bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
+bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
{
if (argumentCountIncludingThis == 1) { // Math.min()
- insertChecks();
- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
+ set(VirtualRegister(resultOperand), constantNaN());
return true;
}
if (argumentCountIncludingThis == 2) { // Math.min(x)
- insertChecks();
Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
addToGraph(Phantom, Edge(result, NumberUse));
set(VirtualRegister(resultOperand), result);
@@ -1855,7 +1440,6 @@ bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOf
}
if (argumentCountIncludingThis == 3) { // Math.min(x, y)
- insertChecks();
set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
return true;
}
@@ -1864,98 +1448,74 @@ bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOf
return false;
}
-template<typename ChecksFunctor>
-bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
+bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction)
{
switch (intrinsic) {
case AbsIntrinsic: {
if (argumentCountIncludingThis == 1) { // Math.abs()
- insertChecks();
- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
+ set(VirtualRegister(resultOperand), constantNaN());
return true;
}
if (!MacroAssembler::supportsFloatingPointAbs())
return false;
- insertChecks();
Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
- node->mergeFlags(NodeMayOverflowInDFG);
+ node->mergeFlags(NodeMayOverflow);
set(VirtualRegister(resultOperand), node);
return true;
}
case MinIntrinsic:
- return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks);
+ return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
case MaxIntrinsic:
- return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks);
-
+ return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
+
case SqrtIntrinsic:
case CosIntrinsic:
- case SinIntrinsic:
- case LogIntrinsic: {
+ case SinIntrinsic: {
if (argumentCountIncludingThis == 1) {
- insertChecks();
- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
+ set(VirtualRegister(resultOperand), constantNaN());
return true;
}
switch (intrinsic) {
case SqrtIntrinsic:
- insertChecks();
+ if (!MacroAssembler::supportsFloatingPointSqrt())
+ return false;
+
set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset))));
return true;
case CosIntrinsic:
- insertChecks();
set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset))));
return true;
case SinIntrinsic:
- insertChecks();
set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset))));
return true;
-
- case LogIntrinsic:
- insertChecks();
- set(VirtualRegister(resultOperand), addToGraph(ArithLog, get(virtualRegisterForArgument(1, registerOffset))));
- return true;
default:
RELEASE_ASSERT_NOT_REACHED();
return false;
}
}
-
- case PowIntrinsic: {
- if (argumentCountIncludingThis < 3) {
- // Math.pow() and Math.pow(x) return NaN.
- insertChecks();
- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
- return true;
- }
- insertChecks();
- VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
- VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
- set(VirtualRegister(resultOperand), addToGraph(ArithPow, get(xOperand), get(yOperand)));
- return true;
- }
case ArrayPushIntrinsic: {
if (argumentCountIncludingThis != 2)
return false;
- ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
+ ArrayMode arrayMode = getArrayMode(m_currentInstruction[6].u.arrayProfile);
if (!arrayMode.isJSArray())
return false;
switch (arrayMode.type()) {
+ case Array::Undecided:
case Array::Int32:
case Array::Double:
case Array::Contiguous:
case Array::ArrayStorage: {
- insertChecks();
Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
set(VirtualRegister(resultOperand), arrayPush);
@@ -1971,7 +1531,7 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int
if (argumentCountIncludingThis != 1)
return false;
- ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
+ ArrayMode arrayMode = getArrayMode(m_currentInstruction[6].u.arrayProfile);
if (!arrayMode.isJSArray())
return false;
switch (arrayMode.type()) {
@@ -1979,7 +1539,6 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int
case Array::Double:
case Array::Contiguous:
case Array::ArrayStorage: {
- insertChecks();
Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
set(VirtualRegister(resultOperand), arrayPop);
return true;
@@ -1994,7 +1553,6 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int
if (argumentCountIncludingThis != 2)
return false;
- insertChecks();
VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
@@ -2007,7 +1565,6 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int
if (argumentCountIncludingThis != 2)
return false;
- insertChecks();
VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
@@ -2015,21 +1572,10 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int
set(VirtualRegister(resultOperand), charCode);
return true;
}
- case Clz32Intrinsic: {
- insertChecks();
- if (argumentCountIncludingThis == 1)
- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
- else {
- Node* operand = get(virtualRegisterForArgument(1, registerOffset));
- set(VirtualRegister(resultOperand), addToGraph(ArithClz32, operand));
- }
- return true;
- }
case FromCharCodeIntrinsic: {
if (argumentCountIncludingThis != 2)
return false;
- insertChecks();
VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
@@ -2042,7 +1588,6 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int
if (argumentCountIncludingThis != 2)
return false;
- insertChecks();
Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
set(VirtualRegister(resultOperand), regExpExec);
@@ -2053,31 +1598,15 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int
if (argumentCountIncludingThis != 2)
return false;
- insertChecks();
Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
set(VirtualRegister(resultOperand), regExpExec);
return true;
}
- case RoundIntrinsic: {
- if (argumentCountIncludingThis == 1) {
- insertChecks();
- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
- return true;
- }
- if (argumentCountIncludingThis == 2) {
- insertChecks();
- Node* operand = get(virtualRegisterForArgument(1, registerOffset));
- Node* roundNode = addToGraph(ArithRound, OpInfo(0), OpInfo(prediction), operand);
- set(VirtualRegister(resultOperand), roundNode);
- return true;
- }
- return false;
- }
+
case IMulIntrinsic: {
if (argumentCountIncludingThis != 3)
return false;
- insertChecks();
VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
Node* left = get(leftOperand);
@@ -2086,77 +1615,14 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int
return true;
}
- case FRoundIntrinsic: {
- if (argumentCountIncludingThis != 2)
- return false;
- insertChecks();
- VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
- set(VirtualRegister(resultOperand), addToGraph(ArithFRound, get(operand)));
- return true;
- }
-
- case DFGTrueIntrinsic: {
- insertChecks();
- set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
- return true;
- }
-
- case OSRExitIntrinsic: {
- insertChecks();
- addToGraph(ForceOSRExit);
- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
- return true;
- }
-
- case IsFinalTierIntrinsic: {
- insertChecks();
- set(VirtualRegister(resultOperand),
- jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
- return true;
- }
-
- case SetInt32HeapPredictionIntrinsic: {
- insertChecks();
- for (int i = 1; i < argumentCountIncludingThis; ++i) {
- Node* node = get(virtualRegisterForArgument(i, registerOffset));
- if (node->hasHeapPrediction())
- node->setHeapPrediction(SpecInt32);
- }
- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
- return true;
- }
-
- case CheckInt32Intrinsic: {
- insertChecks();
- for (int i = 1; i < argumentCountIncludingThis; ++i) {
- Node* node = get(virtualRegisterForArgument(i, registerOffset));
- addToGraph(Phantom, Edge(node, Int32Use));
- }
- set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
- return true;
- }
-
- case FiatInt52Intrinsic: {
- if (argumentCountIncludingThis != 2)
- return false;
- insertChecks();
- VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
- if (enableInt52())
- set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
- else
- set(VirtualRegister(resultOperand), get(operand));
- return true;
- }
-
default:
return false;
}
}
-template<typename ChecksFunctor>
bool ByteCodeParser::handleTypedArrayConstructor(
int resultOperand, InternalFunction* function, int registerOffset,
- int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks)
+ int argumentCountIncludingThis, TypedArrayType type)
{
if (!isTypedView(type))
return false;
@@ -2200,21 +1666,16 @@ bool ByteCodeParser::handleTypedArrayConstructor(
if (argumentCountIncludingThis != 2)
return false;
-
- insertChecks();
+
set(VirtualRegister(resultOperand),
addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
return true;
}
-template<typename ChecksFunctor>
bool ByteCodeParser::handleConstantInternalFunction(
int resultOperand, InternalFunction* function, int registerOffset,
- int argumentCountIncludingThis, CodeSpecializationKind kind, const ChecksFunctor& insertChecks)
+ int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind kind)
{
- if (verbose)
- dataLog(" Handling constant internal function ", JSValue(function), "\n");
-
// If we ever find that we have a lot of internal functions that we specialize for,
// then we should probably have some sort of hashtable dispatch, or maybe even
// dispatch straight through the MethodTable of the InternalFunction. But for now,
@@ -2222,18 +1683,18 @@ bool ByteCodeParser::handleConstantInternalFunction(
// we know about is small enough, that having just a linear cascade of if statements
// is good enough.
+ UNUSED_PARAM(prediction); // Remove this once we do more things.
+
if (function->classInfo() == ArrayConstructor::info()) {
if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
return false;
- insertChecks();
if (argumentCountIncludingThis == 2) {
set(VirtualRegister(resultOperand),
addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
return true;
}
- // FIXME: Array constructor should use "this" as newTarget.
for (int i = 1; i < argumentCountIncludingThis; ++i)
addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
set(VirtualRegister(resultOperand),
@@ -2242,14 +1703,12 @@ bool ByteCodeParser::handleConstantInternalFunction(
}
if (function->classInfo() == StringConstructor::info()) {
- insertChecks();
-
Node* result;
if (argumentCountIncludingThis <= 1)
- result = jsConstant(m_vm->smallStrings.emptyString());
+ result = cellConstant(m_vm->smallStrings.emptyString());
else
- result = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset)));
+ result = addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset)));
if (kind == CodeForConstruct)
result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
@@ -2261,7 +1720,7 @@ bool ByteCodeParser::handleConstantInternalFunction(
for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) {
bool result = handleTypedArrayConstructor(
resultOperand, function, registerOffset, argumentCountIncludingThis,
- indexToTypedArrayType(typeIndex), insertChecks);
+ indexToTypedArrayType(typeIndex));
if (result)
return true;
}
@@ -2269,23 +1728,30 @@ bool ByteCodeParser::handleConstantInternalFunction(
return false;
}
-Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset, NodeType op)
+Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset)
{
Node* propertyStorage;
if (isInlineOffset(offset))
propertyStorage = base;
else
propertyStorage = addToGraph(GetButterfly, base);
-
- StorageAccessData* data = m_graph.m_storageAccessData.add();
- data->offset = offset;
- data->identifierNumber = identifierNumber;
-
- Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base);
+ Node* getByOffset = addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage, base);
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = offset;
+ storageAccessData.identifierNumber = identifierNumber;
+ m_graph.m_storageAccessData.append(storageAccessData);
return getByOffset;
}
+void ByteCodeParser::handleGetByOffset(
+ int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
+ PropertyOffset offset)
+{
+ set(VirtualRegister(destinationOperand), handleGetByOffset(prediction, base, identifierNumber, offset));
+}
+
Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value)
{
Node* propertyStorage;
@@ -2293,601 +1759,89 @@ Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, Propert
propertyStorage = base;
else
propertyStorage = addToGraph(GetButterfly, base);
+ Node* result = addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value);
- StorageAccessData* data = m_graph.m_storageAccessData.add();
- data->offset = offset;
- data->identifierNumber = identifier;
-
- Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value);
-
- return result;
-}
-
-bool ByteCodeParser::check(const ObjectPropertyCondition& condition)
-{
- if (m_graph.watchCondition(condition))
- return true;
-
- Structure* structure = condition.object()->structure();
- if (!condition.structureEnsuresValidity(structure))
- return false;
-
- addToGraph(
- CheckStructure,
- OpInfo(m_graph.addStructureSet(structure)),
- weakJSConstant(condition.object()));
- return true;
-}
-
-GetByOffsetMethod ByteCodeParser::promoteToConstant(GetByOffsetMethod method)
-{
- if (method.kind() == GetByOffsetMethod::LoadFromPrototype
- && method.prototype()->structure()->dfgShouldWatch()) {
- if (JSValue constant = m_graph.tryGetConstantProperty(method.prototype()->value(), method.prototype()->structure(), method.offset()))
- return GetByOffsetMethod::constant(m_graph.freeze(constant));
- }
-
- return method;
-}
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = offset;
+ storageAccessData.identifierNumber = identifier;
+ m_graph.m_storageAccessData.append(storageAccessData);
-GetByOffsetMethod ByteCodeParser::planLoad(const ObjectPropertyCondition& condition)
-{
- if (verbose)
- dataLog("Planning a load: ", condition, "\n");
-
- // We might promote this to Equivalence, and a later DFG pass might also do such promotion
- // even if we fail, but for simplicity this cannot be asked to load an equivalence condition.
- // None of the clients of this method will request a load of an Equivalence condition anyway,
- // and supporting it would complicate the heuristics below.
- RELEASE_ASSERT(condition.kind() == PropertyCondition::Presence);
-
- // Here's the ranking of how to handle this, from most preferred to least preferred:
- //
- // 1) Watchpoint on an equivalence condition and return a constant node for the loaded value.
- // No other code is emitted, and the structure of the base object is never registered.
- // Hence this results in zero code and we won't jettison this compilation if the object
- // transitions, even if the structure is watchable right now.
- //
- // 2) Need to emit a load, and the current structure of the base is going to be watched by the
- // DFG anyway (i.e. dfgShouldWatch). Watch the structure and emit the load. Don't watch the
- // condition, since the act of turning the base into a constant in IR will cause the DFG to
- // watch the structure anyway and doing so would subsume watching the condition.
- //
- // 3) Need to emit a load, and the current structure of the base is watchable but not by the
- // DFG (i.e. transitionWatchpointSetIsStillValid() and !dfgShouldWatchIfPossible()). Watch
- // the condition, and emit a load.
- //
- // 4) Need to emit a load, and the current structure of the base is not watchable. Emit a
- // structure check, and emit a load.
- //
- // 5) The condition does not hold. Give up and return null.
-
- // First, try to promote Presence to Equivalence. We do this before doing anything else
- // because it's the most profitable. Also, there are cases where the presence is watchable but
- // we don't want to watch it unless it became an equivalence (see the relationship between
- // (1), (2), and (3) above).
- ObjectPropertyCondition equivalenceCondition = condition.attemptToMakeEquivalenceWithoutBarrier();
- if (m_graph.watchCondition(equivalenceCondition))
- return GetByOffsetMethod::constant(m_graph.freeze(equivalenceCondition.requiredValue()));
-
- // At this point, we'll have to materialize the condition's base as a constant in DFG IR. Once
- // we do this, the frozen value will have its own idea of what the structure is. Use that from
- // now on just because it's less confusing.
- FrozenValue* base = m_graph.freeze(condition.object());
- Structure* structure = base->structure();
-
- // Check if the structure that we've registered makes the condition hold. If not, just give
- // up. This is case (5) above.
- if (!condition.structureEnsuresValidity(structure))
- return GetByOffsetMethod();
-
- // If the structure is watched by the DFG already, then just use this fact to emit the load.
- // This is case (2) above.
- if (structure->dfgShouldWatch())
- return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
-
- // If we can watch the condition right now, then we can emit the load after watching it. This
- // is case (3) above.
- if (m_graph.watchCondition(condition))
- return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
-
- // We can't watch anything but we know that the current structure satisfies the condition. So,
- // check for that structure and then emit the load.
- addToGraph(
- CheckStructure,
- OpInfo(m_graph.addStructureSet(structure)),
- addToGraph(JSConstant, OpInfo(base)));
- return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
-}
-
-Node* ByteCodeParser::load(
- SpeculatedType prediction, unsigned identifierNumber, const GetByOffsetMethod& method,
- NodeType op)
-{
- switch (method.kind()) {
- case GetByOffsetMethod::Invalid:
- return nullptr;
- case GetByOffsetMethod::Constant:
- return addToGraph(JSConstant, OpInfo(method.constant()));
- case GetByOffsetMethod::LoadFromPrototype: {
- Node* baseNode = addToGraph(JSConstant, OpInfo(method.prototype()));
- return handleGetByOffset(prediction, baseNode, identifierNumber, method.offset(), op);
- }
- case GetByOffsetMethod::Load:
- // Will never see this from planLoad().
- RELEASE_ASSERT_NOT_REACHED();
- return nullptr;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- return nullptr;
-}
-
-Node* ByteCodeParser::load(
- SpeculatedType prediction, const ObjectPropertyCondition& condition, NodeType op)
-{
- GetByOffsetMethod method = planLoad(condition);
- return load(prediction, m_graph.identifiers().ensure(condition.uid()), method, op);
-}
-
-bool ByteCodeParser::check(const ObjectPropertyConditionSet& conditionSet)
-{
- for (const ObjectPropertyCondition condition : conditionSet) {
- if (!check(condition))
- return false;
- }
- return true;
-}
-
-GetByOffsetMethod ByteCodeParser::planLoad(const ObjectPropertyConditionSet& conditionSet)
-{
- if (verbose)
- dataLog("conditionSet = ", conditionSet, "\n");
-
- GetByOffsetMethod result;
- for (const ObjectPropertyCondition condition : conditionSet) {
- switch (condition.kind()) {
- case PropertyCondition::Presence:
- RELEASE_ASSERT(!result); // Should only see exactly one of these.
- result = planLoad(condition);
- if (!result)
- return GetByOffsetMethod();
- break;
- default:
- if (!check(condition))
- return GetByOffsetMethod();
- break;
- }
- }
- RELEASE_ASSERT(!!result);
return result;
}
-Node* ByteCodeParser::load(
- SpeculatedType prediction, const ObjectPropertyConditionSet& conditionSet, NodeType op)
-{
- GetByOffsetMethod method = planLoad(conditionSet);
- return load(
- prediction,
- m_graph.identifiers().ensure(conditionSet.slotBaseCondition().uid()),
- method, op);
-}
-
-ObjectPropertyCondition ByteCodeParser::presenceLike(
- JSObject* knownBase, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
-{
- if (set.isEmpty())
- return ObjectPropertyCondition();
- unsigned attributes;
- PropertyOffset firstOffset = set[0]->getConcurrently(uid, attributes);
- if (firstOffset != offset)
- return ObjectPropertyCondition();
- for (unsigned i = 1; i < set.size(); ++i) {
- unsigned otherAttributes;
- PropertyOffset otherOffset = set[i]->getConcurrently(uid, otherAttributes);
- if (otherOffset != offset || otherAttributes != attributes)
- return ObjectPropertyCondition();
- }
- return ObjectPropertyCondition::presenceWithoutBarrier(knownBase, uid, offset, attributes);
-}
-
-bool ByteCodeParser::checkPresenceLike(
- JSObject* knownBase, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
-{
- return check(presenceLike(knownBase, uid, offset, set));
-}
-
-void ByteCodeParser::checkPresenceLike(
- Node* base, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
-{
- if (JSObject* knownBase = base->dynamicCastConstant<JSObject*>()) {
- if (checkPresenceLike(knownBase, uid, offset, set))
- return;
- }
-
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(set)), base);
-}
-
-template<typename VariantType>
-Node* ByteCodeParser::load(
- SpeculatedType prediction, Node* base, unsigned identifierNumber, const VariantType& variant)
-{
- // Make sure backwards propagation knows that we've used base.
- addToGraph(Phantom, base);
-
- bool needStructureCheck = true;
-
- if (JSObject* knownBase = base->dynamicCastConstant<JSObject*>()) {
- // Try to optimize away the structure check. Note that it's not worth doing anything about this
- // if the base's structure is watched.
- Structure* structure = base->constant()->structure();
- if (!structure->dfgShouldWatch()) {
- UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
-
- if (!variant.conditionSet().isEmpty()) {
- // This means that we're loading from a prototype. We expect the base not to have the
- // property. We can only use ObjectPropertyCondition if all of the structures in the
- // variant.structureSet() agree on the prototype (it would be hilariously rare if they
- // didn't). Note that we are relying on structureSet() having at least one element. That
- // will always be true here because of how GetByIdStatus/PutByIdStatus work.
- JSObject* prototype = variant.structureSet()[0]->storedPrototypeObject();
- bool allAgree = true;
- for (unsigned i = 1; i < variant.structureSet().size(); ++i) {
- if (variant.structureSet()[i]->storedPrototypeObject() != prototype) {
- allAgree = false;
- break;
- }
- }
- if (allAgree) {
- ObjectPropertyCondition condition = ObjectPropertyCondition::absenceWithoutBarrier(
- knownBase, uid, prototype);
- if (check(condition))
- needStructureCheck = false;
- }
- } else {
- // This means we're loading directly from base. We can avoid all of the code that follows
- // if we can prove that the property is a constant. Otherwise, we try to prove that the
- // property is watchably present, in which case we get rid of the structure check.
-
- ObjectPropertyCondition presenceCondition =
- presenceLike(knownBase, uid, variant.offset(), variant.structureSet());
-
- ObjectPropertyCondition equivalenceCondition =
- presenceCondition.attemptToMakeEquivalenceWithoutBarrier();
- if (m_graph.watchCondition(equivalenceCondition))
- return weakJSConstant(equivalenceCondition.requiredValue());
-
- if (check(presenceCondition))
- needStructureCheck = false;
- }
- }
- }
-
- if (needStructureCheck)
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
-
- SpeculatedType loadPrediction;
- NodeType loadOp;
- if (variant.callLinkStatus()) {
- loadPrediction = SpecCellOther;
- loadOp = GetGetterSetterByOffset;
- } else {
- loadPrediction = prediction;
- loadOp = GetByOffset;
- }
-
- Node* loadedValue;
- if (!variant.conditionSet().isEmpty())
- loadedValue = load(loadPrediction, variant.conditionSet(), loadOp);
- else {
- if (needStructureCheck && base->hasConstant()) {
- // We did emit a structure check. That means that we have an opportunity to do constant folding
- // here, since we didn't do it above.
- JSValue constant = m_graph.tryGetConstantProperty(
- base->asJSValue(), variant.structureSet(), variant.offset());
- if (constant)
- return weakJSConstant(constant);
- }
-
- loadedValue = handleGetByOffset(
- loadPrediction, base, identifierNumber, variant.offset(), loadOp);
- }
-
- return loadedValue;
-}
-
-Node* ByteCodeParser::store(Node* base, unsigned identifier, const PutByIdVariant& variant, Node* value)
-{
- RELEASE_ASSERT(variant.kind() == PutByIdVariant::Replace);
-
- checkPresenceLike(base, m_graph.identifiers()[identifier], variant.offset(), variant.structure());
- return handlePutByOffset(base, identifier, variant.offset(), value);
-}
-
void ByteCodeParser::handleGetById(
int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
const GetByIdStatus& getByIdStatus)
{
- NodeType getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById;
-
- if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::enableAccessInlining()) {
- set(VirtualRegister(destinationOperand),
- addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
- return;
- }
-
- if (getByIdStatus.numVariants() > 1) {
- if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode)
- || !Options::enablePolymorphicAccessInlining()) {
- set(VirtualRegister(destinationOperand),
- addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
- return;
- }
-
- Vector<MultiGetByOffsetCase, 2> cases;
-
- // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
- // optimal, if there is some rarely executed case in the chain that requires a lot
- // of checks and those checks are not watchpointable.
- for (const GetByIdVariant& variant : getByIdStatus.variants()) {
- if (variant.conditionSet().isEmpty()) {
- cases.append(
- MultiGetByOffsetCase(
- variant.structureSet(),
- GetByOffsetMethod::load(variant.offset())));
- continue;
- }
-
- GetByOffsetMethod method = planLoad(variant.conditionSet());
- if (!method) {
- set(VirtualRegister(destinationOperand),
- addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
- return;
- }
-
- cases.append(MultiGetByOffsetCase(variant.structureSet(), method));
- }
-
- if (m_graph.compilation())
- m_graph.compilation()->noticeInlinedGetById();
-
- // 2) Emit a MultiGetByOffset
- MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
- data->cases = cases;
- data->identifierNumber = identifierNumber;
+ if (!getByIdStatus.isSimple()
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCache)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCacheWatchpoint)) {
set(VirtualRegister(destinationOperand),
- addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
+ addToGraph(
+ getByIdStatus.makesCalls() ? GetByIdFlush : GetById,
+ OpInfo(identifierNumber), OpInfo(prediction), base));
return;
}
- ASSERT(getByIdStatus.numVariants() == 1);
- GetByIdVariant variant = getByIdStatus[0];
+ ASSERT(getByIdStatus.structureSet().size());
- Node* loadedValue = load(prediction, base, identifierNumber, variant);
- if (!loadedValue) {
- set(VirtualRegister(destinationOperand),
- addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
- return;
- }
-
if (m_graph.compilation())
m_graph.compilation()->noticeInlinedGetById();
- if (!variant.callLinkStatus()) {
- set(VirtualRegister(destinationOperand), loadedValue);
- return;
- }
-
- Node* getter = addToGraph(GetGetter, loadedValue);
-
- // Make a call. We don't try to get fancy with using the smallest operand number because
- // the stack layout phase should compress the stack anyway.
-
- unsigned numberOfParameters = 0;
- numberOfParameters++; // The 'this' argument.
- numberOfParameters++; // True return PC.
-
- // Start with a register offset that corresponds to the last in-use register.
- int registerOffset = virtualRegisterForLocal(
- m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
- registerOffset -= numberOfParameters;
- registerOffset -= JSStack::CallFrameHeaderSize;
-
- // Get the alignment right.
- registerOffset = -WTF::roundUpToMultipleOf(
- stackAlignmentRegisters(),
- -registerOffset);
-
- ensureLocals(
- m_inlineStackTop->remapOperand(
- VirtualRegister(registerOffset)).toLocal());
-
- // Issue SetLocals. This has two effects:
- // 1) That's how handleCall() sees the arguments.
- // 2) If we inline then this ensures that the arguments are flushed so that if you use
- // the dreaded arguments object on the getter, the right things happen. Well, sort of -
- // since we only really care about 'this' in this case. But we're not going to take that
- // shortcut.
- int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
- set(VirtualRegister(nextRegister++), base, ImmediateNakedSet);
-
- handleCall(
- destinationOperand, Call, InlineCallFrame::GetterCall, OPCODE_LENGTH(op_get_by_id),
- getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
-}
-
-void ByteCodeParser::emitPutById(
- Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
-{
- if (isDirect)
- addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
- else
- addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
-}
-
-void ByteCodeParser::handlePutById(
- Node* base, unsigned identifierNumber, Node* value,
- const PutByIdStatus& putByIdStatus, bool isDirect)
-{
- if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::enableAccessInlining()) {
- if (!putByIdStatus.isSet())
- addToGraph(ForceOSRExit);
- emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
- return;
- }
+ Node* originalBaseForBaselineJIT = base;
+
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(getByIdStatus.structureSet())), base);
- if (putByIdStatus.numVariants() > 1) {
- if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
- || !Options::enablePolymorphicAccessInlining()) {
- emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
- return;
+ if (getByIdStatus.chain()) {
+ m_graph.chains().addLazily(getByIdStatus.chain());
+ Structure* currentStructure = getByIdStatus.structureSet().singletonStructure();
+ JSObject* currentObject = 0;
+ for (unsigned i = 0; i < getByIdStatus.chain()->size(); ++i) {
+ currentObject = asObject(currentStructure->prototypeForLookup(m_inlineStackTop->m_codeBlock));
+ currentStructure = getByIdStatus.chain()->at(i);
+ base = cellConstantWithStructureCheck(currentObject, currentStructure);
}
-
- if (!isDirect) {
- for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
- if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
- continue;
- if (!check(putByIdStatus[variantIndex].conditionSet())) {
- emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
- return;
- }
- }
- }
-
- if (m_graph.compilation())
- m_graph.compilation()->noticeInlinedPutById();
-
- MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
- data->variants = putByIdStatus.variants();
- data->identifierNumber = identifierNumber;
- addToGraph(MultiPutByOffset, OpInfo(data), base, value);
- return;
- }
-
- ASSERT(putByIdStatus.numVariants() == 1);
- const PutByIdVariant& variant = putByIdStatus[0];
-
- switch (variant.kind()) {
- case PutByIdVariant::Replace: {
- store(base, identifierNumber, variant, value);
- if (m_graph.compilation())
- m_graph.compilation()->noticeInlinedPutById();
- return;
}
- case PutByIdVariant::Transition: {
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
- if (!check(variant.conditionSet())) {
- emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
- return;
- }
-
- ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated());
+ // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
+ // ensure that the base of the original get_by_id is kept alive until we're done with
+ // all of the speculations. We only insert the Phantom if there had been a CheckStructure
+ // on something other than the base following the CheckStructure on base, or if the
+ // access was compiled to a WeakJSConstant specific value, in which case we might not
+ // have any explicit use of the base at all.
+ if (getByIdStatus.specificValue() || originalBaseForBaselineJIT != base)
+ addToGraph(Phantom, originalBaseForBaselineJIT);
- Node* propertyStorage;
- Transition* transition = m_graph.m_transitions.add(
- variant.oldStructureForTransition(), variant.newStructure());
-
- if (variant.reallocatesStorage()) {
-
- // If we're growing the property storage then it must be because we're
- // storing into the out-of-line storage.
- ASSERT(!isInlineOffset(variant.offset()));
-
- if (!variant.oldStructureForTransition()->outOfLineCapacity()) {
- propertyStorage = addToGraph(
- AllocatePropertyStorage, OpInfo(transition), base);
- } else {
- propertyStorage = addToGraph(
- ReallocatePropertyStorage, OpInfo(transition),
- base, addToGraph(GetButterfly, base));
- }
- } else {
- if (isInlineOffset(variant.offset()))
- propertyStorage = base;
- else
- propertyStorage = addToGraph(GetButterfly, base);
- }
-
- StorageAccessData* data = m_graph.m_storageAccessData.add();
- data->offset = variant.offset();
- data->identifierNumber = identifierNumber;
+ if (getByIdStatus.specificValue()) {
+ ASSERT(getByIdStatus.specificValue().isCell());
- addToGraph(
- PutByOffset,
- OpInfo(data),
- propertyStorage,
- base,
- value);
-
- // FIXME: PutStructure goes last until we fix either
- // https://bugs.webkit.org/show_bug.cgi?id=142921 or
- // https://bugs.webkit.org/show_bug.cgi?id=142924.
- addToGraph(PutStructure, OpInfo(transition), base);
-
- if (m_graph.compilation())
- m_graph.compilation()->noticeInlinedPutById();
+ set(VirtualRegister(destinationOperand), cellConstant(getByIdStatus.specificValue().asCell()));
return;
}
-
- case PutByIdVariant::Setter: {
- Node* loadedValue = load(SpecCellOther, base, identifierNumber, variant);
- if (!loadedValue) {
- emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
- return;
- }
-
- Node* setter = addToGraph(GetSetter, loadedValue);
-
- // Make a call. We don't try to get fancy with using the smallest operand number because
- // the stack layout phase should compress the stack anyway.
- unsigned numberOfParameters = 0;
- numberOfParameters++; // The 'this' argument.
- numberOfParameters++; // The new value.
- numberOfParameters++; // True return PC.
-
- // Start with a register offset that corresponds to the last in-use register.
- int registerOffset = virtualRegisterForLocal(
- m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
- registerOffset -= numberOfParameters;
- registerOffset -= JSStack::CallFrameHeaderSize;
-
- // Get the alignment right.
- registerOffset = -WTF::roundUpToMultipleOf(
- stackAlignmentRegisters(),
- -registerOffset);
-
- ensureLocals(
- m_inlineStackTop->remapOperand(
- VirtualRegister(registerOffset)).toLocal());
-
- int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
- set(VirtualRegister(nextRegister++), base, ImmediateNakedSet);
- set(VirtualRegister(nextRegister++), value, ImmediateNakedSet);
-
- handleCall(
- VirtualRegister().offset(), Call, InlineCallFrame::SetterCall,
- OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset,
- *variant.callLinkStatus(), SpecOther);
- return;
- }
-
- default: {
- emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
- return;
- } }
+ handleGetByOffset(
+ destinationOperand, prediction, base, identifierNumber, getByIdStatus.offset());
}
void ByteCodeParser::prepareToParseBlock()
{
- clearCaches();
- ASSERT(m_setLocalQueue.isEmpty());
+ for (unsigned i = 0; i < m_constants.size(); ++i)
+ m_constants[i] = ConstantRecord();
+ m_cellConstantNodes.clear();
}
-void ByteCodeParser::clearCaches()
+Node* ByteCodeParser::getScope(bool skipTop, unsigned skipCount)
{
- m_constants.resize(0);
+ Node* localBase = get(VirtualRegister(JSStack::ScopeChain));
+ if (skipTop) {
+ ASSERT(!inlineCallFrame());
+ localBase = addToGraph(SkipTopScope, localBase);
+ }
+ for (unsigned n = skipCount; n--;)
+ localBase = addToGraph(SkipScope, localBase);
+ return localBase;
}
bool ByteCodeParser::parseBlock(unsigned limit)
@@ -2905,9 +1859,10 @@ bool ByteCodeParser::parseBlock(unsigned limit)
m_graph.m_arguments.resize(m_numArguments);
for (unsigned argument = 0; argument < m_numArguments; ++argument) {
VariableAccessData* variable = newVariableAccessData(
- virtualRegisterForArgument(argument));
+ virtualRegisterForArgument(argument), m_codeBlock->isCaptured(virtualRegisterForArgument(argument)));
variable->mergeStructureCheckHoistingFailed(
- m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+ m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint));
variable->mergeCheckArrayHoistingFailed(
m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
@@ -2918,7 +1873,9 @@ bool ByteCodeParser::parseBlock(unsigned limit)
}
while (true) {
- processSetLocalQueue();
+ for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
+ m_setLocalQueue[i].execute(this);
+ m_setLocalQueue.resize(0);
// Don't extend over jump destinations.
if (m_currentIndex == limit) {
@@ -2939,9 +1896,6 @@ bool ByteCodeParser::parseBlock(unsigned limit)
m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
- if (Options::verboseDFGByteCodeParsing())
- dataLog(" parsing ", currentCodeOrigin(), "\n");
-
if (m_graph.compilation()) {
addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
@@ -2951,24 +1905,26 @@ bool ByteCodeParser::parseBlock(unsigned limit)
// === Function entry opcodes ===
- case op_enter: {
- Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
+ case op_enter:
// Initialize all locals to undefined.
for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
- set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet);
+ set(virtualRegisterForLocal(i), constantUndefined(), ImmediateSet);
NEXT_OPCODE(op_enter);
- }
+
+ case op_touch_entry:
+ if (m_inlineStackTop->m_codeBlock->symbolTable()->m_functionEnteredOnce.isStillValid())
+ addToGraph(ForceOSRExit);
+ NEXT_OPCODE(op_touch_entry);
case op_to_this: {
Node* op1 = getThis();
if (op1->op() != ToThis) {
Structure* cachedStructure = currentInstruction[2].u.structure.get();
- if (currentInstruction[2].u.toThisStatus != ToThisOK
- || !cachedStructure
+ if (!cachedStructure
|| cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
|| m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
|| m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
- || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint)) {
setThis(addToGraph(ToThis, op1));
} else {
addToGraph(
@@ -2983,34 +1939,18 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_create_this: {
int calleeOperand = currentInstruction[2].u.operand;
Node* callee = get(VirtualRegister(calleeOperand));
-
- JSFunction* function = callee->dynamicCastConstant<JSFunction*>();
- if (!function) {
- JSCell* cachedFunction = currentInstruction[4].u.jsCell.unvalidatedGet();
- if (cachedFunction
- && cachedFunction != JSCell::seenMultipleCalleeObjects()
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
- ASSERT(cachedFunction->inherits(JSFunction::info()));
-
- FrozenValue* frozen = m_graph.freeze(cachedFunction);
- addToGraph(CheckCell, OpInfo(frozen), callee);
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(JSConstant, OpInfo(frozen)));
-
- function = static_cast<JSFunction*>(cachedFunction);
- }
- }
-
bool alreadyEmitted = false;
- if (function) {
- if (FunctionRareData* rareData = function->rareData()) {
- if (Structure* structure = rareData->allocationStructure()) {
- m_graph.freeze(rareData);
- m_graph.watchpoints().addLazily(rareData->allocationProfileWatchpointSet());
- // The callee is still live up to this point.
- addToGraph(Phantom, callee);
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure)));
- alreadyEmitted = true;
- }
+ if (callee->op() == WeakJSConstant) {
+ JSCell* cell = callee->weakConstant();
+ ASSERT(cell->inherits(JSFunction::info()));
+
+ JSFunction* function = jsCast<JSFunction*>(cell);
+ if (Structure* structure = function->allocationStructure()) {
+ addToGraph(AllocationProfileWatchpoint, OpInfo(function));
+ // The callee is still live up to this point.
+ addToGraph(Phantom, callee);
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure)));
+ alreadyEmitted = true;
}
}
if (!alreadyEmitted) {
@@ -3071,6 +2011,21 @@ bool ByteCodeParser::parseBlock(unsigned limit)
NEXT_OPCODE(op_new_regexp);
}
+ case op_get_callee: {
+ JSCell* cachedFunction = currentInstruction[2].u.jsCell.get();
+ if (!cachedFunction
+ || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadFunction)) {
+ set(VirtualRegister(currentInstruction[1].u.operand), get(VirtualRegister(JSStack::Callee)));
+ } else {
+ ASSERT(cachedFunction->inherits(JSFunction::info()));
+ Node* actualCallee = get(VirtualRegister(JSStack::Callee));
+ addToGraph(CheckFunction, OpInfo(cachedFunction), actualCallee);
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(WeakJSConstant, OpInfo(cachedFunction)));
+ }
+ NEXT_OPCODE(op_get_callee);
+ }
+
// === Bitwise operations ===
case op_bitand: {
@@ -3130,7 +2085,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
int srcDst = currentInstruction[1].u.operand;
VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
Node* op = get(srcDstVirtualRegister);
- set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
+ set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, one())));
NEXT_OPCODE(op_inc);
}
@@ -3138,7 +2093,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
int srcDst = currentInstruction[1].u.operand;
VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
Node* op = get(srcDstVirtualRegister);
- set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
+ set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, one())));
NEXT_OPCODE(op_dec);
}
@@ -3210,10 +2165,15 @@ bool ByteCodeParser::parseBlock(unsigned limit)
set(VirtualRegister(currentInstruction[1].u.operand), op);
NEXT_OPCODE(op_mov);
}
-
- case op_check_tdz: {
- addToGraph(CheckNotEmpty, get(VirtualRegister(currentInstruction[1].u.operand)));
- NEXT_OPCODE(op_check_tdz);
+
+ case op_captured_mov: {
+ Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
+ if (VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet) {
+ if (set->state() != IsInvalidated)
+ addToGraph(NotifyWrite, OpInfo(set), op);
+ }
+ set(VirtualRegister(currentInstruction[1].u.operand), op);
+ NEXT_OPCODE(op_captured_mov);
}
case op_check_has_instance:
@@ -3257,12 +2217,6 @@ bool ByteCodeParser::parseBlock(unsigned limit)
NEXT_OPCODE(op_is_object);
}
- case op_is_object_or_null: {
- Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObjectOrNull, value));
- NEXT_OPCODE(op_is_object_or_null);
- }
-
case op_is_function: {
Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value));
@@ -3322,6 +2276,15 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_less: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ if (canFold(op1) && canFold(op2)) {
+ JSValue a = valueOfJSConstant(op1);
+ JSValue b = valueOfJSConstant(op2);
+ if (a.isNumber() && b.isNumber()) {
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ getJSConstantForValue(jsBoolean(a.asNumber() < b.asNumber())));
+ NEXT_OPCODE(op_less);
+ }
+ }
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2));
NEXT_OPCODE(op_less);
}
@@ -3329,6 +2292,15 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_lesseq: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ if (canFold(op1) && canFold(op2)) {
+ JSValue a = valueOfJSConstant(op1);
+ JSValue b = valueOfJSConstant(op2);
+ if (a.isNumber() && b.isNumber()) {
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ getJSConstantForValue(jsBoolean(a.asNumber() <= b.asNumber())));
+ NEXT_OPCODE(op_lesseq);
+ }
+ }
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2));
NEXT_OPCODE(op_lesseq);
}
@@ -3336,6 +2308,15 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_greater: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ if (canFold(op1) && canFold(op2)) {
+ JSValue a = valueOfJSConstant(op1);
+ JSValue b = valueOfJSConstant(op2);
+ if (a.isNumber() && b.isNumber()) {
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ getJSConstantForValue(jsBoolean(a.asNumber() > b.asNumber())));
+ NEXT_OPCODE(op_greater);
+ }
+ }
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2));
NEXT_OPCODE(op_greater);
}
@@ -3343,6 +2324,15 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_greatereq: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ if (canFold(op1) && canFold(op2)) {
+ JSValue a = valueOfJSConstant(op1);
+ JSValue b = valueOfJSConstant(op2);
+ if (a.isNumber() && b.isNumber()) {
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ getJSConstantForValue(jsBoolean(a.asNumber() >= b.asNumber())));
+ NEXT_OPCODE(op_greatereq);
+ }
+ }
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2));
NEXT_OPCODE(op_greatereq);
}
@@ -3350,41 +2340,79 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_eq: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ if (canFold(op1) && canFold(op2)) {
+ JSValue a = valueOfJSConstant(op1);
+ JSValue b = valueOfJSConstant(op2);
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ getJSConstantForValue(jsBoolean(JSValue::equal(m_codeBlock->globalObject()->globalExec(), a, b))));
+ NEXT_OPCODE(op_eq);
+ }
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2));
NEXT_OPCODE(op_eq);
}
case op_eq_null: {
Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull))));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, constantNull()));
NEXT_OPCODE(op_eq_null);
}
case op_stricteq: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2));
+ if (canFold(op1) && canFold(op2)) {
+ JSValue a = valueOfJSConstant(op1);
+ JSValue b = valueOfJSConstant(op2);
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ getJSConstantForValue(jsBoolean(JSValue::strictEqual(m_codeBlock->globalObject()->globalExec(), a, b))));
+ NEXT_OPCODE(op_stricteq);
+ }
+ if (isConstantForCompareStrictEq(op1))
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEqConstant, op2, op1));
+ else if (isConstantForCompareStrictEq(op2))
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEqConstant, op1, op2));
+ else
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2));
NEXT_OPCODE(op_stricteq);
}
case op_neq: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ if (canFold(op1) && canFold(op2)) {
+ JSValue a = valueOfJSConstant(op1);
+ JSValue b = valueOfJSConstant(op2);
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ getJSConstantForValue(jsBoolean(!JSValue::equal(m_codeBlock->globalObject()->globalExec(), a, b))));
+ NEXT_OPCODE(op_neq);
+ }
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
NEXT_OPCODE(op_neq);
}
case op_neq_null: {
Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)))));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, constantNull())));
NEXT_OPCODE(op_neq_null);
}
case op_nstricteq: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ if (canFold(op1) && canFold(op2)) {
+ JSValue a = valueOfJSConstant(op1);
+ JSValue b = valueOfJSConstant(op2);
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ getJSConstantForValue(jsBoolean(!JSValue::strictEqual(m_codeBlock->globalObject()->globalExec(), a, b))));
+ NEXT_OPCODE(op_nstricteq);
+ }
Node* invertedResult;
- invertedResult = addToGraph(CompareStrictEq, op1, op2);
+ if (isConstantForCompareStrictEq(op1))
+ invertedResult = addToGraph(CompareStrictEqConstant, op2, op1);
+ else if (isConstantForCompareStrictEq(op2))
+ invertedResult = addToGraph(CompareStrictEqConstant, op1, op2);
+ else
+ invertedResult = addToGraph(CompareStrictEq, op1, op2);
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult));
NEXT_OPCODE(op_nstricteq);
}
@@ -3392,36 +2420,13 @@ bool ByteCodeParser::parseBlock(unsigned limit)
// === Property access operations ===
case op_get_by_val: {
- SpeculatedType prediction = getPredictionWithoutOSRExit();
-
+ SpeculatedType prediction = getPrediction();
+
Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
+ ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read);
Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
- bool compiledAsGetById = false;
- {
- ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
- ByValInfo* byValInfo = m_inlineStackTop->m_byValInfos.get(CodeOrigin(currentCodeOrigin().bytecodeIndex));
- // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null.
- // At that time, there is no information.
- if (byValInfo && byValInfo->stubInfo && !byValInfo->tookSlowPath && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent)) {
- compiledAsGetById = true;
- unsigned identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl());
- UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
-
- addToGraph(CheckIdent, OpInfo(uid), property);
-
- GetByIdStatus getByIdStatus = GetByIdStatus::computeForStubInfo(
- locker, m_inlineStackTop->m_profiledBlock,
- byValInfo->stubInfo, currentCodeOrigin(), uid);
-
- handleGetById(currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
- }
- }
-
- if (!compiledAsGetById) {
- ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read);
- Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
- set(VirtualRegister(currentInstruction[1].u.operand), getByVal);
- }
+ Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
+ set(VirtualRegister(currentInstruction[1].u.operand), getByVal);
NEXT_OPCODE(op_get_by_val);
}
@@ -3430,7 +2435,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_put_by_val: {
Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
- ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Write);
+ ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Write);
Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
@@ -3439,7 +2444,6 @@ bool ByteCodeParser::parseBlock(unsigned limit)
addVarArgChild(property);
addVarArgChild(value);
addVarArgChild(0); // Leave room for property storage.
- addVarArgChild(0); // Leave room for length.
addToGraph(Node::VarArg, opcodeID == op_put_by_val_direct ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
NEXT_OPCODE(op_put_by_val);
@@ -3453,11 +2457,10 @@ bool ByteCodeParser::parseBlock(unsigned limit)
Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
- UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
+ StringImpl* uid = m_graph.identifiers()[identifierNumber];
GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
- m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
- m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
- currentCodeOrigin(), uid);
+ m_inlineStackTop->m_profiledBlock, m_inlineStackTop->m_stubInfos,
+ m_currentIndex, uid);
handleGetById(
currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
@@ -3476,63 +2479,175 @@ bool ByteCodeParser::parseBlock(unsigned limit)
bool direct = currentInstruction[8].u.operand;
PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
- m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
- m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
- currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
+ m_inlineStackTop->m_profiledBlock, m_inlineStackTop->m_stubInfos,
+ m_currentIndex, m_graph.identifiers()[identifierNumber]);
+ bool canCountAsInlined = true;
+ if (!putByIdStatus.isSet()) {
+ addToGraph(ForceOSRExit);
+ canCountAsInlined = false;
+ }
- handlePutById(base, identifierNumber, value, putByIdStatus, direct);
+ bool hasExitSite =
+ m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCache)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCacheWatchpoint);
+
+ if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
+ handlePutByOffset(base, identifierNumber, putByIdStatus.offset(), value);
+ } else if (
+ !hasExitSite
+ && putByIdStatus.isSimpleTransition()
+ && (!putByIdStatus.structureChain()
+ || putByIdStatus.structureChain()->isStillValid())) {
+
+ m_graph.chains().addLazily(putByIdStatus.structureChain());
+
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
+ if (!direct) {
+ if (!putByIdStatus.oldStructure()->storedPrototype().isNull()) {
+ cellConstantWithStructureCheck(
+ putByIdStatus.oldStructure()->storedPrototype().asCell());
+ }
+
+ for (unsigned i = 0; i < putByIdStatus.structureChain()->size(); ++i) {
+ JSValue prototype = putByIdStatus.structureChain()->at(i)->storedPrototype();
+ if (prototype.isNull())
+ continue;
+ cellConstantWithStructureCheck(prototype.asCell());
+ }
+ }
+ ASSERT(putByIdStatus.oldStructure()->transitionWatchpointSetHasBeenInvalidated());
+
+ Node* propertyStorage;
+ StructureTransitionData* transitionData =
+ m_graph.addStructureTransitionData(
+ StructureTransitionData(
+ putByIdStatus.oldStructure(),
+ putByIdStatus.newStructure()));
+
+ if (putByIdStatus.oldStructure()->outOfLineCapacity()
+ != putByIdStatus.newStructure()->outOfLineCapacity()) {
+
+ // If we're growing the property storage then it must be because we're
+ // storing into the out-of-line storage.
+ ASSERT(!isInlineOffset(putByIdStatus.offset()));
+
+ if (!putByIdStatus.oldStructure()->outOfLineCapacity()) {
+ propertyStorage = addToGraph(
+ AllocatePropertyStorage, OpInfo(transitionData), base);
+ } else {
+ propertyStorage = addToGraph(
+ ReallocatePropertyStorage, OpInfo(transitionData),
+ base, addToGraph(GetButterfly, base));
+ }
+ } else {
+ if (isInlineOffset(putByIdStatus.offset()))
+ propertyStorage = base;
+ else
+ propertyStorage = addToGraph(GetButterfly, base);
+ }
+
+ addToGraph(PutStructure, OpInfo(transitionData), base);
+
+ addToGraph(
+ PutByOffset,
+ OpInfo(m_graph.m_storageAccessData.size()),
+ propertyStorage,
+ base,
+ value);
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = putByIdStatus.offset();
+ storageAccessData.identifierNumber = identifierNumber;
+ m_graph.m_storageAccessData.append(storageAccessData);
+ } else {
+ if (direct)
+ addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
+ else
+ addToGraph(PutById, OpInfo(identifierNumber), base, value);
+ canCountAsInlined = false;
+ }
+
+ if (canCountAsInlined && m_graph.compilation())
+ m_graph.compilation()->noticeInlinedPutById();
+
NEXT_OPCODE(op_put_by_id);
}
- case op_profile_type: {
- Node* valueToProfile = get(VirtualRegister(currentInstruction[1].u.operand));
- addToGraph(ProfileType, OpInfo(currentInstruction[2].u.location), valueToProfile);
- NEXT_OPCODE(op_profile_type);
+ case op_init_global_const_nop: {
+ NEXT_OPCODE(op_init_global_const_nop);
}
- case op_profile_control_flow: {
- BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation;
- addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation));
- NEXT_OPCODE(op_profile_control_flow);
+ case op_init_global_const: {
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ addToGraph(
+ PutGlobalVar,
+ OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
+ value);
+ NEXT_OPCODE(op_init_global_const);
}
// === Block terminators. ===
case op_jmp: {
- int relativeOffset = currentInstruction[1].u.operand;
+ unsigned relativeOffset = currentInstruction[1].u.operand;
addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- if (relativeOffset <= 0)
- flushForTerminal();
LAST_OPCODE(op_jmp);
}
case op_jtrue: {
unsigned relativeOffset = currentInstruction[2].u.operand;
Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
- addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition);
+ if (canFold(condition)) {
+ TriState state = valueOfJSConstant(condition).pureToBoolean();
+ if (state == TrueTriState) {
+ addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
+ LAST_OPCODE(op_jtrue);
+ } else if (state == FalseTriState) {
+ // Emit a placeholder for this bytecode operation but otherwise
+ // just fall through.
+ addToGraph(Phantom);
+ NEXT_OPCODE(op_jtrue);
+ }
+ }
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition);
LAST_OPCODE(op_jtrue);
}
case op_jfalse: {
unsigned relativeOffset = currentInstruction[2].u.operand;
Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
- addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition);
+ if (canFold(condition)) {
+ TriState state = valueOfJSConstant(condition).pureToBoolean();
+ if (state == FalseTriState) {
+ addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
+ LAST_OPCODE(op_jfalse);
+ } else if (state == TrueTriState) {
+ // Emit a placeholder for this bytecode operation but otherwise
+ // just fall through.
+ addToGraph(Phantom);
+ NEXT_OPCODE(op_jfalse);
+ }
+ }
+ addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition);
LAST_OPCODE(op_jfalse);
}
case op_jeq_null: {
unsigned relativeOffset = currentInstruction[2].u.operand;
Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
- Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
- addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition);
+ Node* condition = addToGraph(CompareEqConstant, value, constantNull());
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition);
LAST_OPCODE(op_jeq_null);
}
case op_jneq_null: {
unsigned relativeOffset = currentInstruction[2].u.operand;
Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
- Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
- addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition);
+ Node* condition = addToGraph(CompareEqConstant, value, constantNull());
+ addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition);
LAST_OPCODE(op_jneq_null);
}
@@ -3540,8 +2655,25 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned relativeOffset = currentInstruction[3].u.operand;
Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+ if (canFold(op1) && canFold(op2)) {
+ JSValue aValue = valueOfJSConstant(op1);
+ JSValue bValue = valueOfJSConstant(op2);
+ if (aValue.isNumber() && bValue.isNumber()) {
+ double a = aValue.asNumber();
+ double b = bValue.asNumber();
+ if (a < b) {
+ addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
+ LAST_OPCODE(op_jless);
+ } else {
+ // Emit a placeholder for this bytecode operation but otherwise
+ // just fall through.
+ addToGraph(Phantom);
+ NEXT_OPCODE(op_jless);
+ }
+ }
+ }
Node* condition = addToGraph(CompareLess, op1, op2);
- addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition);
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition);
LAST_OPCODE(op_jless);
}
@@ -3549,8 +2681,25 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned relativeOffset = currentInstruction[3].u.operand;
Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+ if (canFold(op1) && canFold(op2)) {
+ JSValue aValue = valueOfJSConstant(op1);
+ JSValue bValue = valueOfJSConstant(op2);
+ if (aValue.isNumber() && bValue.isNumber()) {
+ double a = aValue.asNumber();
+ double b = bValue.asNumber();
+ if (a <= b) {
+ addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
+ LAST_OPCODE(op_jlesseq);
+ } else {
+ // Emit a placeholder for this bytecode operation but otherwise
+ // just fall through.
+ addToGraph(Phantom);
+ NEXT_OPCODE(op_jlesseq);
+ }
+ }
+ }
Node* condition = addToGraph(CompareLessEq, op1, op2);
- addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition);
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition);
LAST_OPCODE(op_jlesseq);
}
@@ -3558,8 +2707,25 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned relativeOffset = currentInstruction[3].u.operand;
Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+ if (canFold(op1) && canFold(op2)) {
+ JSValue aValue = valueOfJSConstant(op1);
+ JSValue bValue = valueOfJSConstant(op2);
+ if (aValue.isNumber() && bValue.isNumber()) {
+ double a = aValue.asNumber();
+ double b = bValue.asNumber();
+ if (a > b) {
+ addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
+ LAST_OPCODE(op_jgreater);
+ } else {
+ // Emit a placeholder for this bytecode operation but otherwise
+ // just fall through.
+ addToGraph(Phantom);
+ NEXT_OPCODE(op_jgreater);
+ }
+ }
+ }
Node* condition = addToGraph(CompareGreater, op1, op2);
- addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition);
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition);
LAST_OPCODE(op_jgreater);
}
@@ -3567,8 +2733,25 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned relativeOffset = currentInstruction[3].u.operand;
Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+ if (canFold(op1) && canFold(op2)) {
+ JSValue aValue = valueOfJSConstant(op1);
+ JSValue bValue = valueOfJSConstant(op2);
+ if (aValue.isNumber() && bValue.isNumber()) {
+ double a = aValue.asNumber();
+ double b = bValue.asNumber();
+ if (a >= b) {
+ addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
+ LAST_OPCODE(op_jgreatereq);
+ } else {
+ // Emit a placeholder for this bytecode operation but otherwise
+ // just fall through.
+ addToGraph(Phantom);
+ NEXT_OPCODE(op_jgreatereq);
+ }
+ }
+ }
Node* condition = addToGraph(CompareGreaterEq, op1, op2);
- addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition);
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition);
LAST_OPCODE(op_jgreatereq);
}
@@ -3576,8 +2759,25 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned relativeOffset = currentInstruction[3].u.operand;
Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+ if (canFold(op1) && canFold(op2)) {
+ JSValue aValue = valueOfJSConstant(op1);
+ JSValue bValue = valueOfJSConstant(op2);
+ if (aValue.isNumber() && bValue.isNumber()) {
+ double a = aValue.asNumber();
+ double b = bValue.asNumber();
+ if (a < b) {
+ // Emit a placeholder for this bytecode operation but otherwise
+ // just fall through.
+ addToGraph(Phantom);
+ NEXT_OPCODE(op_jnless);
+ } else {
+ addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
+ LAST_OPCODE(op_jnless);
+ }
+ }
+ }
Node* condition = addToGraph(CompareLess, op1, op2);
- addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition);
+ addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition);
LAST_OPCODE(op_jnless);
}
@@ -3585,8 +2785,25 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned relativeOffset = currentInstruction[3].u.operand;
Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+ if (canFold(op1) && canFold(op2)) {
+ JSValue aValue = valueOfJSConstant(op1);
+ JSValue bValue = valueOfJSConstant(op2);
+ if (aValue.isNumber() && bValue.isNumber()) {
+ double a = aValue.asNumber();
+ double b = bValue.asNumber();
+ if (a <= b) {
+ // Emit a placeholder for this bytecode operation but otherwise
+ // just fall through.
+ addToGraph(Phantom);
+ NEXT_OPCODE(op_jnlesseq);
+ } else {
+ addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
+ LAST_OPCODE(op_jnlesseq);
+ }
+ }
+ }
Node* condition = addToGraph(CompareLessEq, op1, op2);
- addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition);
+ addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition);
LAST_OPCODE(op_jnlesseq);
}
@@ -3594,8 +2811,25 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned relativeOffset = currentInstruction[3].u.operand;
Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+ if (canFold(op1) && canFold(op2)) {
+ JSValue aValue = valueOfJSConstant(op1);
+ JSValue bValue = valueOfJSConstant(op2);
+ if (aValue.isNumber() && bValue.isNumber()) {
+ double a = aValue.asNumber();
+ double b = bValue.asNumber();
+ if (a > b) {
+ // Emit a placeholder for this bytecode operation but otherwise
+ // just fall through.
+ addToGraph(Phantom);
+ NEXT_OPCODE(op_jngreater);
+ } else {
+ addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
+ LAST_OPCODE(op_jngreater);
+ }
+ }
+ }
Node* condition = addToGraph(CompareGreater, op1, op2);
- addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition);
+ addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition);
LAST_OPCODE(op_jngreater);
}
@@ -3603,75 +2837,92 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned relativeOffset = currentInstruction[3].u.operand;
Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+ if (canFold(op1) && canFold(op2)) {
+ JSValue aValue = valueOfJSConstant(op1);
+ JSValue bValue = valueOfJSConstant(op2);
+ if (aValue.isNumber() && bValue.isNumber()) {
+ double a = aValue.asNumber();
+ double b = bValue.asNumber();
+ if (a >= b) {
+ // Emit a placeholder for this bytecode operation but otherwise
+ // just fall through.
+ addToGraph(Phantom);
+ NEXT_OPCODE(op_jngreatereq);
+ } else {
+ addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
+ LAST_OPCODE(op_jngreatereq);
+ }
+ }
+ }
Node* condition = addToGraph(CompareGreaterEq, op1, op2);
- addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition);
+ addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition);
LAST_OPCODE(op_jngreatereq);
}
case op_switch_imm: {
- SwitchData& data = *m_graph.m_switchData.add();
+ SwitchData data;
data.kind = SwitchImm;
data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
- data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
+ data.setFallThroughBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
if (!table.branchOffsets[i])
continue;
unsigned target = m_currentIndex + table.branchOffsets[i];
- if (target == data.fallThrough.bytecodeIndex())
+ if (target == data.fallThroughBytecodeIndex())
continue;
- data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
+ data.cases.append(SwitchCase::withBytecodeIndex(jsNumber(static_cast<int32_t>(table.min + i)), target));
}
- addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
- flushIfTerminal(data);
+ m_graph.m_switchData.append(data);
+ addToGraph(Switch, OpInfo(&m_graph.m_switchData.last()), get(VirtualRegister(currentInstruction[3].u.operand)));
LAST_OPCODE(op_switch_imm);
}
case op_switch_char: {
- SwitchData& data = *m_graph.m_switchData.add();
+ SwitchData data;
data.kind = SwitchChar;
data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
- data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
+ data.setFallThroughBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
if (!table.branchOffsets[i])
continue;
unsigned target = m_currentIndex + table.branchOffsets[i];
- if (target == data.fallThrough.bytecodeIndex())
+ if (target == data.fallThroughBytecodeIndex())
continue;
data.cases.append(
SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
}
- addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
- flushIfTerminal(data);
+ m_graph.m_switchData.append(data);
+ addToGraph(Switch, OpInfo(&m_graph.m_switchData.last()), get(VirtualRegister(currentInstruction[3].u.operand)));
LAST_OPCODE(op_switch_char);
}
case op_switch_string: {
- SwitchData& data = *m_graph.m_switchData.add();
+ SwitchData data;
data.kind = SwitchString;
data.switchTableIndex = currentInstruction[1].u.operand;
- data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
+ data.setFallThroughBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
StringJumpTable::StringOffsetTable::iterator iter;
StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
for (iter = table.offsetTable.begin(); iter != end; ++iter) {
unsigned target = m_currentIndex + iter->value.branchOffset;
- if (target == data.fallThrough.bytecodeIndex())
+ if (target == data.fallThroughBytecodeIndex())
continue;
data.cases.append(
SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
}
- addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
- flushIfTerminal(data);
+ m_graph.m_switchData.append(data);
+ addToGraph(Switch, OpInfo(&m_graph.m_switchData.last()), get(VirtualRegister(currentInstruction[3].u.operand)));
LAST_OPCODE(op_switch_string);
}
case op_ret:
+ flushArgumentsAndCapturedVariables();
if (inlineCallFrame()) {
- flushForReturn();
- if (m_inlineStackTop->m_returnValue.isValid())
- setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush);
+ ASSERT(m_inlineStackTop->m_returnValue.isValid());
+ setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSet);
m_inlineStackTop->m_didReturn = true;
if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
// If we're returning from the first block, then we're done parsing.
@@ -3693,31 +2944,28 @@ bool ByteCodeParser::parseBlock(unsigned limit)
LAST_OPCODE(op_ret);
}
addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
- flushForReturn();
LAST_OPCODE(op_ret);
case op_end:
+ flushArgumentsAndCapturedVariables();
ASSERT(!inlineCallFrame());
addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
- flushForReturn();
LAST_OPCODE(op_end);
case op_throw:
addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand)));
- flushForTerminal();
+ flushAllArgumentsAndCapturedVariablesInInlineStack();
addToGraph(Unreachable);
LAST_OPCODE(op_throw);
case op_throw_static_error:
addToGraph(ThrowReferenceError);
- flushForTerminal();
+ flushAllArgumentsAndCapturedVariablesInInlineStack();
addToGraph(Unreachable);
LAST_OPCODE(op_throw_static_error);
case op_call:
handleCall(currentInstruction, Call, CodeForCall);
- // Verify that handleCall(), which could have inlined the callee, didn't trash m_currentInstruction
- ASSERT(m_currentInstruction == currentInstruction);
NEXT_OPCODE(op_call);
case op_construct:
@@ -3725,13 +2973,31 @@ bool ByteCodeParser::parseBlock(unsigned limit)
NEXT_OPCODE(op_construct);
case op_call_varargs: {
- handleVarargsCall(currentInstruction, CallVarargs, CodeForCall);
- NEXT_OPCODE(op_call_varargs);
- }
+ ASSERT(inlineCallFrame());
+ ASSERT(currentInstruction[4].u.operand == m_inlineStackTop->m_codeBlock->argumentsRegister().offset());
+ ASSERT(!m_inlineStackTop->m_codeBlock->symbolTable()->slowArguments());
+ // It would be cool to funnel this into handleCall() so that it can handle
+ // inlining. But currently that won't be profitable anyway, since none of the
+ // uses of call_varargs will be inlineable. So we set this up manually and
+ // without inline/intrinsic detection.
+
+ SpeculatedType prediction = getPrediction();
+
+ addToGraph(CheckArgumentsNotCreated);
+
+ unsigned argCount = inlineCallFrame()->arguments.size();
+ if (JSStack::ThisArgument + argCount > m_parameterSlots)
+ m_parameterSlots = JSStack::ThisArgument + argCount;
- case op_construct_varargs: {
- handleVarargsCall(currentInstruction, ConstructVarargs, CodeForConstruct);
- NEXT_OPCODE(op_construct_varargs);
+ addVarArgChild(get(VirtualRegister(currentInstruction[2].u.operand))); // callee
+ addVarArgChild(get(VirtualRegister(currentInstruction[3].u.operand))); // this
+ for (unsigned argument = 1; argument < argCount; ++argument)
+ addVarArgChild(get(virtualRegisterForArgument(argument)));
+
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(Node::VarArg, Call, OpInfo(0), OpInfo(prediction)));
+
+ NEXT_OPCODE(op_call_varargs);
}
case op_jneq_ptr:
@@ -3740,17 +3006,16 @@ bool ByteCodeParser::parseBlock(unsigned limit)
// already gnarly enough as it is.
ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer));
addToGraph(
- CheckCell,
- OpInfo(m_graph.freeze(static_cast<JSCell*>(actualPointerFor(
- m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)))),
+ CheckFunction,
+ OpInfo(actualPointerFor(m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)),
get(VirtualRegister(currentInstruction[1].u.operand)));
addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
LAST_OPCODE(op_jneq_ptr);
case op_resolve_scope: {
int dst = currentInstruction[1].u.operand;
- ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
- unsigned depth = currentInstruction[5].u.operand;
+ ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
+ unsigned depth = currentInstruction[4].u.operand;
// get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
if (needsVarInjectionChecks(resolveType))
@@ -3761,35 +3026,19 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case GlobalVar:
case GlobalPropertyWithVarInjectionChecks:
case GlobalVarWithVarInjectionChecks:
- set(VirtualRegister(dst), weakJSConstant(m_inlineStackTop->m_codeBlock->globalObject()));
- if (resolveType == GlobalPropertyWithVarInjectionChecks || resolveType == GlobalVarWithVarInjectionChecks)
- addToGraph(Phantom, getDirect(m_inlineStackTop->remapOperand(VirtualRegister(currentInstruction[2].u.operand))));
+ set(VirtualRegister(dst), cellConstant(m_inlineStackTop->m_codeBlock->globalObject()));
break;
- case LocalClosureVar:
case ClosureVar:
case ClosureVarWithVarInjectionChecks: {
- Node* localBase = get(VirtualRegister(currentInstruction[2].u.operand));
- addToGraph(Phantom, localBase); // OSR exit cannot handle resolve_scope on a DCE'd scope.
-
- // We have various forms of constant folding here. This is necessary to avoid
- // spurious recompiles in dead-but-foldable code.
- if (SymbolTable* symbolTable = currentInstruction[6].u.symbolTable.get()) {
- InferredValue* singleton = symbolTable->singletonScope();
- if (JSValue value = singleton->inferredValue()) {
- m_graph.watchpoints().addLazily(singleton);
- set(VirtualRegister(dst), weakJSConstant(value));
- break;
- }
- }
- if (JSScope* scope = localBase->dynamicCastConstant<JSScope*>()) {
- for (unsigned n = depth; n--;)
- scope = scope->next();
- set(VirtualRegister(dst), weakJSConstant(scope));
+ JSActivation* activation = currentInstruction[5].u.activation.get();
+ if (activation
+ && activation->symbolTable()->m_functionEnteredOnce.isStillValid()) {
+ addToGraph(FunctionReentryWatchpoint, OpInfo(activation->symbolTable()));
+ set(VirtualRegister(dst), cellConstant(activation));
break;
}
- for (unsigned n = depth; n--;)
- localBase = addToGraph(SkipScope, localBase);
- set(VirtualRegister(dst), localBase);
+ set(VirtualRegister(dst),
+ getScope(m_inlineStackTop->m_codeBlock->needsActivation(), depth));
break;
}
case Dynamic:
@@ -3803,7 +3052,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
int dst = currentInstruction[1].u.operand;
int scope = currentInstruction[2].u.operand;
unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
- UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
+ StringImpl* uid = m_graph.identifiers()[identifierNumber];
ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
Structure* structure = 0;
@@ -3820,118 +3069,62 @@ bool ByteCodeParser::parseBlock(unsigned limit)
UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode.
+ SpeculatedType prediction = getPrediction();
JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
switch (resolveType) {
case GlobalProperty:
case GlobalPropertyWithVarInjectionChecks: {
- SpeculatedType prediction = getPrediction();
-
- GetByIdStatus status = GetByIdStatus::computeFor(structure, uid);
- if (status.state() != GetByIdStatus::Simple
- || status.numVariants() != 1
- || status[0].structureSet().size() != 1) {
+ GetByIdStatus status = GetByIdStatus::computeFor(*m_vm, structure, uid);
+ if (status.takesSlowPath()) {
set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope))));
break;
}
-
- Node* base = weakJSConstant(globalObject);
- Node* result = load(prediction, base, identifierNumber, status[0]);
+ Node* base = cellConstantWithStructureCheck(globalObject, status.structureSet().singletonStructure());
addToGraph(Phantom, get(VirtualRegister(scope)));
- set(VirtualRegister(dst), result);
+ if (JSValue specificValue = status.specificValue())
+ set(VirtualRegister(dst), cellConstant(specificValue.asCell()));
+ else
+ set(VirtualRegister(dst), handleGetByOffset(prediction, base, identifierNumber, operand));
break;
}
case GlobalVar:
case GlobalVarWithVarInjectionChecks: {
addToGraph(Phantom, get(VirtualRegister(scope)));
- WatchpointSet* watchpointSet;
- ScopeOffset offset;
- {
- ConcurrentJITLocker locker(globalObject->symbolTable()->m_lock);
- SymbolTableEntry entry = globalObject->symbolTable()->get(locker, uid);
- watchpointSet = entry.watchpointSet();
- offset = entry.scopeOffset();
- }
- if (watchpointSet && watchpointSet->state() == IsWatched) {
- // This has a fun concurrency story. There is the possibility of a race in two
- // directions:
- //
- // We see that the set IsWatched, but in the meantime it gets invalidated: this is
- // fine because if we saw that it IsWatched then we add a watchpoint. If it gets
- // invalidated, then this compilation is invalidated. Note that in the meantime we
- // may load an absurd value from the global object. It's fine to load an absurd
- // value if the compilation is invalidated anyway.
- //
- // We see that the set IsWatched, but the value isn't yet initialized: this isn't
- // possible because of the ordering of operations.
- //
- // Here's how we order operations:
- //
- // Main thread stores to the global object: always store a value first, and only
- // after that do we touch the watchpoint set. There is a fence in the touch, that
- // ensures that the store to the global object always happens before the touch on the
- // set.
- //
- // Compilation thread: always first load the state of the watchpoint set, and then
- // load the value. The WatchpointSet::state() method does fences for us to ensure
- // that the load of the state happens before our load of the value.
- //
- // Finalizing compilation: this happens on the main thread and synchronously checks
- // validity of all watchpoint sets.
- //
- // We will only perform optimizations if the load of the state yields IsWatched. That
- // means that at least one store would have happened to initialize the original value
- // of the variable (that is, the value we'd like to constant fold to). There may be
- // other stores that happen after that, but those stores will invalidate the
- // watchpoint set and also the compilation.
-
- // Note that we need to use the operand, which is a direct pointer at the global,
- // rather than looking up the global by doing variableAt(offset). That's because the
- // internal data structures of JSSegmentedVariableObject are not thread-safe even
- // though accessing the global itself is. The segmentation involves a vector spine
- // that resizes with malloc/free, so if new globals unrelated to the one we are
- // reading are added, we might access freed memory if we do variableAt().
- WriteBarrier<Unknown>* pointer = bitwise_cast<WriteBarrier<Unknown>*>(operand);
-
- ASSERT(globalObject->findVariableIndex(pointer) == offset);
-
- JSValue value = pointer->get();
- if (value) {
- m_graph.watchpoints().addLazily(watchpointSet);
- set(VirtualRegister(dst), weakJSConstant(value));
- break;
- }
+ SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
+ VariableWatchpointSet* watchpointSet = entry.watchpointSet();
+ JSValue specificValue =
+ watchpointSet ? watchpointSet->inferredValue() : JSValue();
+ if (!specificValue) {
+ set(VirtualRegister(dst), addToGraph(GetGlobalVar, OpInfo(operand), OpInfo(prediction)));
+ break;
}
- SpeculatedType prediction = getPrediction();
- set(VirtualRegister(dst), addToGraph(GetGlobalVar, OpInfo(operand), OpInfo(prediction)));
+ addToGraph(VariableWatchpoint, OpInfo(watchpointSet));
+ set(VirtualRegister(dst), inferredConstant(specificValue));
break;
}
- case LocalClosureVar:
case ClosureVar:
case ClosureVarWithVarInjectionChecks: {
Node* scopeNode = get(VirtualRegister(scope));
-
- // Ideally we wouldn't have to do this Phantom. But:
- //
- // For the constant case: we must do it because otherwise we would have no way of knowing
- // that the scope is live at OSR here.
- //
- // For the non-constant case: GetClosureVar could be DCE'd, but baseline's implementation
- // won't be able to handle an Undefined scope.
- addToGraph(Phantom, scopeNode);
-
- // Constant folding in the bytecode parser is important for performance. This may not
- // have executed yet. If it hasn't, then we won't have a prediction. Lacking a
- // prediction, we'd otherwise think that it has to exit. Then when it did execute, we
- // would recompile. But if we can fold it here, we avoid the exit.
- if (JSValue value = m_graph.tryGetConstantClosureVar(scopeNode, ScopeOffset(operand))) {
- set(VirtualRegister(dst), weakJSConstant(value));
- break;
+ if (JSActivation* activation = m_graph.tryGetActivation(scopeNode)) {
+ SymbolTable* symbolTable = activation->symbolTable();
+ ConcurrentJITLocker locker(symbolTable->m_lock);
+ SymbolTable::Map::iterator iter = symbolTable->find(locker, uid);
+ ASSERT(iter != symbolTable->end(locker));
+ VariableWatchpointSet* watchpointSet = iter->value.watchpointSet();
+ if (watchpointSet) {
+ if (JSValue value = watchpointSet->inferredValue()) {
+ addToGraph(Phantom, scopeNode);
+ addToGraph(VariableWatchpoint, OpInfo(watchpointSet));
+ set(VirtualRegister(dst), inferredConstant(value));
+ break;
+ }
+ }
}
- SpeculatedType prediction = getPrediction();
set(VirtualRegister(dst),
- addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), scopeNode));
+ addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction),
+ addToGraph(GetClosureRegisters, scopeNode)));
break;
}
case Dynamic:
@@ -3943,23 +3136,17 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_put_to_scope: {
unsigned scope = currentInstruction[1].u.operand;
- unsigned identifierNumber = currentInstruction[2].u.operand;
- if (identifierNumber != UINT_MAX)
- identifierNumber = m_inlineStackTop->m_identifierRemap[identifierNumber];
+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
unsigned value = currentInstruction[3].u.operand;
ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
- UniquedStringImpl* uid;
- if (identifierNumber != UINT_MAX)
- uid = m_graph.identifiers()[identifierNumber];
- else
- uid = nullptr;
-
- Structure* structure = nullptr;
- WatchpointSet* watchpoints = nullptr;
+ StringImpl* uid = m_graph.identifiers()[identifierNumber];
+
+ Structure* structure = 0;
+ VariableWatchpointSet* watchpoints = 0;
uintptr_t operand;
{
ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
- if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar)
+ if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
watchpoints = currentInstruction[5].u.watchpointSet;
else
structure = currentInstruction[5].u.structure.get();
@@ -3971,51 +3158,35 @@ bool ByteCodeParser::parseBlock(unsigned limit)
switch (resolveType) {
case GlobalProperty:
case GlobalPropertyWithVarInjectionChecks: {
- PutByIdStatus status;
- if (uid)
- status = PutByIdStatus::computeFor(globalObject, structure, uid, false);
- else
- status = PutByIdStatus(PutByIdStatus::TakesSlowPath);
- if (status.numVariants() != 1
- || status[0].kind() != PutByIdVariant::Replace
- || status[0].structure().size() != 1) {
+ PutByIdStatus status = PutByIdStatus::computeFor(*m_vm, globalObject, structure, uid, false);
+ if (!status.isSimpleReplace()) {
addToGraph(PutById, OpInfo(identifierNumber), get(VirtualRegister(scope)), get(VirtualRegister(value)));
break;
}
- Node* base = weakJSConstant(globalObject);
- store(base, identifierNumber, status[0], get(VirtualRegister(value)));
+ Node* base = cellConstantWithStructureCheck(globalObject, status.oldStructure());
+ addToGraph(Phantom, get(VirtualRegister(scope)));
+ handlePutByOffset(base, identifierNumber, static_cast<PropertyOffset>(operand), get(VirtualRegister(value)));
// Keep scope alive until after put.
addToGraph(Phantom, get(VirtualRegister(scope)));
break;
}
case GlobalVar:
case GlobalVarWithVarInjectionChecks: {
- if (watchpoints) {
- SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
- ASSERT_UNUSED(entry, watchpoints == entry.watchpointSet());
- }
+ SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
+ ASSERT(watchpoints == entry.watchpointSet());
Node* valueNode = get(VirtualRegister(value));
- addToGraph(PutGlobalVar, OpInfo(operand), weakJSConstant(globalObject), valueNode);
- if (watchpoints && watchpoints->state() != IsInvalidated) {
- // Must happen after the store. See comment for GetGlobalVar.
- addToGraph(NotifyWrite, OpInfo(watchpoints));
- }
+ addToGraph(PutGlobalVar, OpInfo(operand), valueNode);
+ if (watchpoints->state() != IsInvalidated)
+ addToGraph(NotifyWrite, OpInfo(watchpoints), valueNode);
// Keep scope alive until after put.
addToGraph(Phantom, get(VirtualRegister(scope)));
break;
}
- case LocalClosureVar:
case ClosureVar:
case ClosureVarWithVarInjectionChecks: {
Node* scopeNode = get(VirtualRegister(scope));
- Node* valueNode = get(VirtualRegister(value));
-
- addToGraph(PutClosureVar, OpInfo(operand), scopeNode, valueNode);
-
- if (watchpoints && watchpoints->state() != IsInvalidated) {
- // Must happen after the store. See comment for GetGlobalVar.
- addToGraph(NotifyWrite, OpInfo(watchpoints));
- }
+ Node* scopeRegisters = addToGraph(GetClosureRegisters, scopeNode);
+ addToGraph(PutClosureVar, OpInfo(operand), scopeNode, scopeRegisters, get(VirtualRegister(value)));
break;
}
case Dynamic:
@@ -4039,100 +3210,84 @@ bool ByteCodeParser::parseBlock(unsigned limit)
addToGraph(LoopHint);
- if (m_vm->watchdog)
+ if (m_vm->watchdog.isEnabled())
addToGraph(CheckWatchdogTimer);
NEXT_OPCODE(op_loop_hint);
}
- case op_create_lexical_environment: {
- VirtualRegister symbolTableRegister(currentInstruction[3].u.operand);
- VirtualRegister initialValueRegister(currentInstruction[4].u.operand);
- ASSERT(symbolTableRegister.isConstant() && initialValueRegister.isConstant());
- FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(symbolTableRegister.offset()));
- FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(initialValueRegister.offset()));
- Node* scope = get(VirtualRegister(currentInstruction[2].u.operand));
- Node* lexicalEnvironment = addToGraph(CreateActivation, OpInfo(symbolTable), OpInfo(initialValue), scope);
- set(VirtualRegister(currentInstruction[1].u.operand), lexicalEnvironment);
- NEXT_OPCODE(op_create_lexical_environment);
- }
-
- case op_get_parent_scope: {
- Node* currentScope = get(VirtualRegister(currentInstruction[2].u.operand));
- Node* newScope = addToGraph(SkipScope, currentScope);
- set(VirtualRegister(currentInstruction[1].u.operand), newScope);
- addToGraph(Phantom, currentScope);
- NEXT_OPCODE(op_get_parent_scope);
- }
-
- case op_get_scope: {
- // Help the later stages a bit by doing some small constant folding here. Note that this
- // only helps for the first basic block. It's extremely important not to constant fold
- // loads from the scope register later, as that would prevent the DFG from tracking the
- // bytecode-level liveness of the scope register.
- Node* callee = get(VirtualRegister(JSStack::Callee));
- Node* result;
- if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>())
- result = weakJSConstant(function->scope());
- else
- result = addToGraph(GetScope, callee);
- set(VirtualRegister(currentInstruction[1].u.operand), result);
- NEXT_OPCODE(op_get_scope);
+ case op_init_lazy_reg: {
+ set(VirtualRegister(currentInstruction[1].u.operand), getJSConstantForValue(JSValue()));
+ ASSERT(operandIsLocal(currentInstruction[1].u.operand));
+ m_graph.m_lazyVars.set(VirtualRegister(currentInstruction[1].u.operand).toLocal());
+ NEXT_OPCODE(op_init_lazy_reg);
}
- case op_create_direct_arguments: {
- noticeArgumentsUse();
- Node* createArguments = addToGraph(CreateDirectArguments);
- set(VirtualRegister(currentInstruction[1].u.operand), createArguments);
- NEXT_OPCODE(op_create_direct_arguments);
+ case op_create_activation: {
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CreateActivation, get(VirtualRegister(currentInstruction[1].u.operand))));
+ NEXT_OPCODE(op_create_activation);
}
- case op_create_scoped_arguments: {
- noticeArgumentsUse();
- Node* createArguments = addToGraph(CreateScopedArguments, get(VirtualRegister(currentInstruction[2].u.operand)));
+ case op_create_arguments: {
+ m_graph.m_hasArguments = true;
+ Node* createArguments = addToGraph(CreateArguments, get(VirtualRegister(currentInstruction[1].u.operand)));
set(VirtualRegister(currentInstruction[1].u.operand), createArguments);
- NEXT_OPCODE(op_create_scoped_arguments);
+ set(unmodifiedArgumentsRegister(VirtualRegister(currentInstruction[1].u.operand)), createArguments);
+ NEXT_OPCODE(op_create_arguments);
+ }
+
+ case op_tear_off_activation: {
+ addToGraph(TearOffActivation, get(VirtualRegister(currentInstruction[1].u.operand)));
+ NEXT_OPCODE(op_tear_off_activation);
}
- case op_create_out_of_band_arguments: {
- noticeArgumentsUse();
- Node* createArguments = addToGraph(CreateClonedArguments);
- set(VirtualRegister(currentInstruction[1].u.operand), createArguments);
- NEXT_OPCODE(op_create_out_of_band_arguments);
+ case op_tear_off_arguments: {
+ m_graph.m_hasArguments = true;
+ addToGraph(TearOffArguments, get(unmodifiedArgumentsRegister(VirtualRegister(currentInstruction[1].u.operand))), get(VirtualRegister(currentInstruction[2].u.operand)));
+ NEXT_OPCODE(op_tear_off_arguments);
}
- case op_get_from_arguments: {
- set(VirtualRegister(currentInstruction[1].u.operand),
- addToGraph(
- GetFromArguments,
- OpInfo(currentInstruction[3].u.operand),
- OpInfo(getPrediction()),
- get(VirtualRegister(currentInstruction[2].u.operand))));
- NEXT_OPCODE(op_get_from_arguments);
+ case op_get_arguments_length: {
+ m_graph.m_hasArguments = true;
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetMyArgumentsLengthSafe));
+ NEXT_OPCODE(op_get_arguments_length);
}
- case op_put_to_arguments: {
- addToGraph(
- PutToArguments,
- OpInfo(currentInstruction[2].u.operand),
- get(VirtualRegister(currentInstruction[1].u.operand)),
- get(VirtualRegister(currentInstruction[3].u.operand)));
- NEXT_OPCODE(op_put_to_arguments);
+ case op_get_argument_by_val: {
+ m_graph.m_hasArguments = true;
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(
+ GetMyArgumentByValSafe, OpInfo(0), OpInfo(getPrediction()),
+ get(VirtualRegister(currentInstruction[3].u.operand))));
+ NEXT_OPCODE(op_get_argument_by_val);
}
case op_new_func: {
- FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(currentInstruction[3].u.operand);
- FrozenValue* frozen = m_graph.freezeStrong(decl);
- set(VirtualRegister(currentInstruction[1].u.operand),
- addToGraph(NewFunction, OpInfo(frozen), get(VirtualRegister(currentInstruction[2].u.operand))));
+ if (!currentInstruction[3].u.operand) {
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand)));
+ } else {
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(
+ NewFunction,
+ OpInfo(currentInstruction[2].u.operand),
+ get(VirtualRegister(currentInstruction[1].u.operand))));
+ }
NEXT_OPCODE(op_new_func);
}
-
+
+ case op_new_captured_func: {
+ Node* function = addToGraph(
+ NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand));
+ if (VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet)
+ addToGraph(NotifyWrite, OpInfo(set), function);
+ set(VirtualRegister(currentInstruction[1].u.operand), function);
+ NEXT_OPCODE(op_new_captured_func);
+ }
+
case op_new_func_exp: {
- FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(currentInstruction[3].u.operand);
- FrozenValue* frozen = m_graph.freezeStrong(expr);
set(VirtualRegister(currentInstruction[1].u.operand),
- addToGraph(NewFunction, OpInfo(frozen), get(VirtualRegister(currentInstruction[2].u.operand))));
+ addToGraph(NewFunctionExpression, OpInfo(currentInstruction[2].u.operand)));
NEXT_OPCODE(op_new_func_exp);
}
@@ -4143,98 +3298,17 @@ bool ByteCodeParser::parseBlock(unsigned limit)
}
case op_to_number: {
- Node* node = get(VirtualRegister(currentInstruction[2].u.operand));
- addToGraph(Phantom, Edge(node, NumberUse));
- set(VirtualRegister(currentInstruction[1].u.operand), node);
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(Identity, Edge(get(VirtualRegister(currentInstruction[2].u.operand)), NumberUse)));
NEXT_OPCODE(op_to_number);
}
-
- case op_to_string: {
- Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToString, value));
- NEXT_OPCODE(op_to_string);
- }
-
+
case op_in: {
set(VirtualRegister(currentInstruction[1].u.operand),
addToGraph(In, get(VirtualRegister(currentInstruction[2].u.operand)), get(VirtualRegister(currentInstruction[3].u.operand))));
NEXT_OPCODE(op_in);
}
- case op_get_enumerable_length: {
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumerableLength,
- get(VirtualRegister(currentInstruction[2].u.operand))));
- NEXT_OPCODE(op_get_enumerable_length);
- }
-
- case op_has_generic_property: {
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasGenericProperty,
- get(VirtualRegister(currentInstruction[2].u.operand)),
- get(VirtualRegister(currentInstruction[3].u.operand))));
- NEXT_OPCODE(op_has_generic_property);
- }
-
- case op_has_structure_property: {
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasStructureProperty,
- get(VirtualRegister(currentInstruction[2].u.operand)),
- get(VirtualRegister(currentInstruction[3].u.operand)),
- get(VirtualRegister(currentInstruction[4].u.operand))));
- NEXT_OPCODE(op_has_structure_property);
- }
-
- case op_has_indexed_property: {
- Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
- ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read);
- Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
- Node* hasIterableProperty = addToGraph(HasIndexedProperty, OpInfo(arrayMode.asWord()), base, property);
- set(VirtualRegister(currentInstruction[1].u.operand), hasIterableProperty);
- NEXT_OPCODE(op_has_indexed_property);
- }
-
- case op_get_direct_pname: {
- SpeculatedType prediction = getPredictionWithoutOSRExit();
-
- Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
- Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
- Node* index = get(VirtualRegister(currentInstruction[4].u.operand));
- Node* enumerator = get(VirtualRegister(currentInstruction[5].u.operand));
-
- addVarArgChild(base);
- addVarArgChild(property);
- addVarArgChild(index);
- addVarArgChild(enumerator);
- set(VirtualRegister(currentInstruction[1].u.operand),
- addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction)));
-
- NEXT_OPCODE(op_get_direct_pname);
- }
-
- case op_get_property_enumerator: {
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetPropertyEnumerator,
- get(VirtualRegister(currentInstruction[2].u.operand))));
- NEXT_OPCODE(op_get_property_enumerator);
- }
-
- case op_enumerator_structure_pname: {
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorStructurePname,
- get(VirtualRegister(currentInstruction[2].u.operand)),
- get(VirtualRegister(currentInstruction[3].u.operand))));
- NEXT_OPCODE(op_enumerator_structure_pname);
- }
-
- case op_enumerator_generic_pname: {
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorGenericPname,
- get(VirtualRegister(currentInstruction[2].u.operand)),
- get(VirtualRegister(currentInstruction[3].u.operand))));
- NEXT_OPCODE(op_enumerator_generic_pname);
- }
-
- case op_to_index_string: {
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToIndexString,
- get(VirtualRegister(currentInstruction[2].u.operand))));
- NEXT_OPCODE(op_to_index_string);
- }
-
default:
// Parse failed! This should not happen because the capabilities checker
// should have caught it.
@@ -4248,52 +3322,62 @@ void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleT
{
ASSERT(!block->isLinked);
ASSERT(!block->isEmpty());
- Node* node = block->terminal();
+ Node* node = block->last();
ASSERT(node->isTerminal());
switch (node->op()) {
case Jump:
- node->targetBlock() = blockForBytecodeOffset(possibleTargets, node->targetBytecodeOffsetDuringParsing());
+ node->setTakenBlock(blockForBytecodeOffset(possibleTargets, node->takenBytecodeOffsetDuringParsing()));
break;
- case Branch: {
- BranchData* data = node->branchData();
- data->taken.block = blockForBytecodeOffset(possibleTargets, data->takenBytecodeIndex());
- data->notTaken.block = blockForBytecodeOffset(possibleTargets, data->notTakenBytecodeIndex());
+ case Branch:
+ node->setTakenBlock(blockForBytecodeOffset(possibleTargets, node->takenBytecodeOffsetDuringParsing()));
+ node->setNotTakenBlock(blockForBytecodeOffset(possibleTargets, node->notTakenBytecodeOffsetDuringParsing()));
break;
- }
- case Switch: {
- SwitchData* data = node->switchData();
+ case Switch:
for (unsigned i = node->switchData()->cases.size(); i--;)
- data->cases[i].target.block = blockForBytecodeOffset(possibleTargets, data->cases[i].target.bytecodeIndex());
- data->fallThrough.block = blockForBytecodeOffset(possibleTargets, data->fallThrough.bytecodeIndex());
+ node->switchData()->cases[i].target = blockForBytecodeOffset(possibleTargets, node->switchData()->cases[i].targetBytecodeIndex());
+ node->switchData()->fallThrough = blockForBytecodeOffset(possibleTargets, node->switchData()->fallThroughBytecodeIndex());
break;
- }
default:
break;
}
- if (verbose)
- dataLog("Marking ", RawPointer(block), " as linked (actually did linking)\n");
- block->didLink();
+#if !ASSERT_DISABLED
+ block->isLinked = true;
+#endif
}
void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets)
{
for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
- if (verbose)
- dataLog("Attempting to link ", RawPointer(unlinkedBlocks[i].m_block), "\n");
if (unlinkedBlocks[i].m_needsNormalLinking) {
- if (verbose)
- dataLog(" Does need normal linking.\n");
linkBlock(unlinkedBlocks[i].m_block, possibleTargets);
unlinkedBlocks[i].m_needsNormalLinking = false;
}
}
}
+void ByteCodeParser::buildOperandMapsIfNecessary()
+{
+ if (m_haveBuiltOperandMaps)
+ return;
+
+ for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i)
+ m_identifierMap.add(m_codeBlock->identifier(i).impl(), i);
+ for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i) {
+ JSValue value = m_codeBlock->getConstant(i + FirstConstantRegisterIndex);
+ if (!value)
+ m_emptyJSValueIndex = i + FirstConstantRegisterIndex;
+ else
+ m_jsValueMap.add(JSValue::encode(value), i + FirstConstantRegisterIndex);
+ }
+
+ m_haveBuiltOperandMaps = true;
+}
+
ByteCodeParser::InlineStackEntry::InlineStackEntry(
ByteCodeParser* byteCodeParser,
CodeBlock* codeBlock,
@@ -4303,7 +3387,7 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
VirtualRegister returnValueVR,
VirtualRegister inlineCallFrameStart,
int argumentCountIncludingThis,
- InlineCallFrame::Kind kind)
+ CodeSpecializationKind kind)
: m_byteCodeParser(byteCodeParser)
, m_codeBlock(codeBlock)
, m_profiledBlock(profiledBlock)
@@ -4321,11 +3405,8 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
// We do this while holding the lock because we want to encourage StructureStubInfo's
// to be potentially added to operations and because the profiled block could be in the
// middle of LLInt->JIT tier-up in which case we would be adding the info's right now.
- if (m_profiledBlock->hasBaselineJITProfiling()) {
+ if (m_profiledBlock->hasBaselineJITProfiling())
m_profiledBlock->getStubInfoMap(locker, m_stubInfos);
- m_profiledBlock->getCallLinkInfoMap(locker, m_callLinkInfos);
- m_profiledBlock->getByValInfoMap(locker, m_byValInfos);
- }
}
m_argumentPositions.resize(argumentCountIncludingThis);
@@ -4335,35 +3416,87 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
m_argumentPositions[i] = argumentPosition;
}
+ // Track the code-block-global exit sites.
+ if (m_exitProfile.hasExitSite(ArgumentsEscaped)) {
+ byteCodeParser->m_graph.m_executablesWhoseArgumentsEscaped.add(
+ codeBlock->ownerExecutable());
+ }
+
if (m_caller) {
// Inline case.
ASSERT(codeBlock != byteCodeParser->m_codeBlock);
ASSERT(inlineCallFrameStart.isValid());
ASSERT(callsiteBlockHead);
- m_inlineCallFrame = byteCodeParser->m_graph.m_plan.inlineCallFrames->add();
- byteCodeParser->m_graph.freeze(codeBlock->ownerExecutable());
- // The owner is the machine code block, and we already have a barrier on that when the
- // plan finishes.
- m_inlineCallFrame->executable.setWithoutWriteBarrier(codeBlock->ownerExecutable());
- m_inlineCallFrame->setStackOffset(inlineCallFrameStart.offset() - JSStack::CallFrameHeaderSize);
+ m_inlineCallFrame = byteCodeParser->m_graph.m_inlineCallFrames->add();
+ initializeLazyWriteBarrierForInlineCallFrameExecutable(
+ byteCodeParser->m_graph.m_plan.writeBarriers,
+ m_inlineCallFrame->executable,
+ byteCodeParser->m_codeBlock,
+ m_inlineCallFrame,
+ byteCodeParser->m_codeBlock->ownerExecutable(),
+ codeBlock->ownerExecutable());
+ m_inlineCallFrame->stackOffset = inlineCallFrameStart.offset() - JSStack::CallFrameHeaderSize;
if (callee) {
m_inlineCallFrame->calleeRecovery = ValueRecovery::constant(callee);
m_inlineCallFrame->isClosureCall = false;
} else
m_inlineCallFrame->isClosureCall = true;
m_inlineCallFrame->caller = byteCodeParser->currentCodeOrigin();
- m_inlineCallFrame->arguments.resizeToFit(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet.
- m_inlineCallFrame->kind = kind;
+ m_inlineCallFrame->arguments.resize(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet.
+ m_inlineCallFrame->isCall = isCall(kind);
+
+ if (m_inlineCallFrame->caller.inlineCallFrame)
+ m_inlineCallFrame->capturedVars = m_inlineCallFrame->caller.inlineCallFrame->capturedVars;
+ else {
+ for (int i = byteCodeParser->m_codeBlock->m_numVars; i--;) {
+ if (byteCodeParser->m_codeBlock->isCaptured(virtualRegisterForLocal(i)))
+ m_inlineCallFrame->capturedVars.set(i);
+ }
+ }
+
+ for (int i = argumentCountIncludingThis; i--;) {
+ VirtualRegister argument = virtualRegisterForArgument(i);
+ if (codeBlock->isCaptured(argument))
+ m_inlineCallFrame->capturedVars.set(VirtualRegister(argument.offset() + m_inlineCallFrame->stackOffset).toLocal());
+ }
+ for (size_t i = codeBlock->m_numVars; i--;) {
+ VirtualRegister local = virtualRegisterForLocal(i);
+ if (codeBlock->isCaptured(local))
+ m_inlineCallFrame->capturedVars.set(VirtualRegister(local.offset() + m_inlineCallFrame->stackOffset).toLocal());
+ }
+
+ byteCodeParser->buildOperandMapsIfNecessary();
m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
+ m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers());
m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables());
for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
- UniquedStringImpl* rep = codeBlock->identifier(i).impl();
- unsigned index = byteCodeParser->m_graph.identifiers().ensure(rep);
- m_identifierRemap[i] = index;
+ StringImpl* rep = codeBlock->identifier(i).impl();
+ BorrowedIdentifierMap::AddResult result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_graph.identifiers().numberOfIdentifiers());
+ if (result.isNewEntry)
+ byteCodeParser->m_graph.identifiers().addLazily(rep);
+ m_identifierRemap[i] = result.iterator->value;
+ }
+ for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) {
+ JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex);
+ if (!value) {
+ if (byteCodeParser->m_emptyJSValueIndex == UINT_MAX) {
+ byteCodeParser->m_emptyJSValueIndex = byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex;
+ byteCodeParser->addConstant(JSValue());
+ byteCodeParser->m_constants.append(ConstantRecord());
+ }
+ m_constantRemap[i] = byteCodeParser->m_emptyJSValueIndex;
+ continue;
+ }
+ JSValueMap::AddResult result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex);
+ if (result.isNewEntry) {
+ byteCodeParser->addConstant(value);
+ byteCodeParser->m_constants.append(ConstantRecord());
+ }
+ m_constantRemap[i] = result.iterator->value;
}
for (unsigned i = 0; i < codeBlock->numberOfConstantBuffers(); ++i) {
// If we inline the same code block multiple times, we don't want to needlessly
@@ -4395,10 +3528,13 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
m_inlineCallFrame = 0;
m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
+ m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers());
m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables());
for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
m_identifierRemap[i] = i;
+ for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i)
+ m_constantRemap[i] = i + FirstConstantRegisterIndex;
for (size_t i = 0; i < codeBlock->numberOfConstantBuffers(); ++i)
m_constantBufferRemap[i] = i;
for (size_t i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i)
@@ -4406,13 +3542,14 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
m_callsiteBlockHeadNeedsLinking = false;
}
+ for (size_t i = 0; i < m_constantRemap.size(); ++i)
+ ASSERT(m_constantRemap[i] >= static_cast<unsigned>(FirstConstantRegisterIndex));
+
byteCodeParser->m_inlineStackTop = this;
}
void ByteCodeParser::parseCodeBlock()
{
- clearCaches();
-
CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
if (m_graph.compilation()) {
@@ -4420,16 +3557,8 @@ void ByteCodeParser::parseCodeBlock()
*m_vm->m_perBytecodeProfiler, m_inlineStackTop->m_profiledBlock);
}
- if (UNLIKELY(Options::dumpSourceAtDFGTime())) {
- Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback->ensureDeferredSourceDump();
- if (inlineCallFrame()) {
- DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITCode::DFGJIT, inlineCallFrame()->caller);
- deferredSourceDump.append(dump);
- } else
- deferredSourceDump.append(DeferredSourceDump(codeBlock->baselineVersion()));
- }
-
- if (Options::dumpBytecodeAtDFGTime()) {
+ bool shouldDumpBytecode = Options::dumpBytecodeAtDFGTime();
+ if (shouldDumpBytecode) {
dataLog("Parsing ", *codeBlock);
if (inlineCallFrame()) {
dataLog(
@@ -4437,7 +3566,9 @@ void ByteCodeParser::parseCodeBlock()
" ", inlineCallFrame()->caller);
}
dataLog(
- ": needsActivation = ", codeBlock->needsActivation(),
+ ": captureCount = ", codeBlock->symbolTable() ? codeBlock->symbolTable()->captureCount() : 0,
+ ", needsFullScopeChain = ", codeBlock->needsFullScopeChain(),
+ ", needsActivation = ", codeBlock->ownerExecutable()->needsActivation(),
", isStrictMode = ", codeBlock->ownerExecutable()->isStrictMode(), "\n");
codeBlock->baselineVersion()->dumpBytecode();
}
@@ -4476,7 +3607,7 @@ void ByteCodeParser::parseCodeBlock()
m_currentBlock = m_graph.lastBlock();
m_currentBlock->bytecodeBegin = m_currentIndex;
} else {
- RefPtr<BasicBlock> block = adoptRef(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals, PNaN));
+ RefPtr<BasicBlock> block = adoptRef(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals));
m_currentBlock = block.get();
// This assertion checks two things:
// 1) If the bytecodeBegin is greater than currentIndex, then something has gone
@@ -4484,12 +3615,7 @@ void ByteCodeParser::parseCodeBlock()
// 2) If the bytecodeBegin is equal to the currentIndex, then we failed to do
// a peephole coalescing of this block in the if statement above. So, we're
// generating suboptimal code and leaving more work for the CFG simplifier.
- if (!m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
- unsigned lastBegin =
- m_inlineStackTop->m_unlinkedBlocks.last().m_block->bytecodeBegin;
- ASSERT_UNUSED(
- lastBegin, lastBegin == UINT_MAX || lastBegin < m_currentIndex);
- }
+ ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_inlineStackTop->m_unlinkedBlocks.last().m_block->bytecodeBegin < m_currentIndex);
m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
m_inlineStackTop->m_blockLinkingTargets.append(block.get());
// The first block is definitely an OSR target.
@@ -4510,13 +3636,10 @@ void ByteCodeParser::parseCodeBlock()
// are at the end of an inline function, or we realized that we
// should stop parsing because there was a return in the first
// basic block.
- ASSERT(m_currentBlock->isEmpty() || m_currentBlock->terminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame()) || !shouldContinueParsing);
+ ASSERT(m_currentBlock->isEmpty() || m_currentBlock->last()->isTerminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame()) || !shouldContinueParsing);
- if (!shouldContinueParsing) {
- if (Options::verboseDFGByteCodeParsing())
- dataLog("Done parsing ", *codeBlock, "\n");
+ if (!shouldContinueParsing)
return;
- }
m_currentBlock = 0;
} while (m_currentIndex < limit);
@@ -4524,9 +3647,6 @@ void ByteCodeParser::parseCodeBlock()
// Should have reached the end of the instructions.
ASSERT(m_currentIndex == codeBlock->instructions().size());
-
- if (Options::verboseDFGByteCodeParsing())
- dataLog("Done parsing ", *codeBlock, " (fell off end)\n");
}
bool ByteCodeParser::parse()
@@ -4534,21 +3654,25 @@ bool ByteCodeParser::parse()
// Set during construction.
ASSERT(!m_currentIndex);
- if (Options::verboseDFGByteCodeParsing())
- dataLog("Parsing ", *m_codeBlock, "\n");
-
- m_dfgCodeBlock = m_graph.m_plan.profiledDFGCodeBlock.get();
- if (isFTL(m_graph.m_plan.mode) && m_dfgCodeBlock
- && Options::enablePolyvariantDevirtualization()) {
- if (Options::enablePolyvariantCallInlining())
- CallLinkStatus::computeDFGStatuses(m_dfgCodeBlock, m_callContextMap);
- if (Options::enablePolyvariantByIdInlining())
- m_dfgCodeBlock->getStubInfoMap(m_dfgStubInfos);
+ if (m_codeBlock->captureCount()) {
+ SymbolTable* symbolTable = m_codeBlock->symbolTable();
+ ConcurrentJITLocker locker(symbolTable->m_lock);
+ SymbolTable::Map::iterator iter = symbolTable->begin(locker);
+ SymbolTable::Map::iterator end = symbolTable->end(locker);
+ for (; iter != end; ++iter) {
+ VariableWatchpointSet* set = iter->value.watchpointSet();
+ if (!set)
+ continue;
+ size_t index = static_cast<size_t>(VirtualRegister(iter->value.getIndex()).toLocal());
+ while (m_localWatchpoints.size() <= index)
+ m_localWatchpoints.append(nullptr);
+ m_localWatchpoints[index] = set;
+ }
}
InlineStackEntry inlineStackEntry(
this, m_codeBlock, m_profiledBlock, 0, 0, VirtualRegister(), VirtualRegister(),
- m_codeBlock->numParameters(), InlineCallFrame::Call);
+ m_codeBlock->numParameters(), CodeForCall);
parseCodeBlock();
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.h b/Source/JavaScriptCore/dfg/DFGByteCodeParser.h
index bd6888d70..cb8626998 100644
--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.h
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.h
@@ -28,10 +28,17 @@
#if ENABLE(DFG_JIT)
-namespace JSC { namespace DFG {
+#include "DFGGraph.h"
-class Graph;
+namespace JSC {
+class CodeBlock;
+class VM;
+
+namespace DFG {
+
+// Populate the Graph with a basic block of code from the CodeBlock,
+// starting at the provided bytecode index.
bool parse(Graph&);
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp b/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
index 6bf4759ce..d149fc692 100644
--- a/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,7 +34,7 @@
#include "DFGPhase.h"
#include "DFGSafeToExecute.h"
#include "OperandsInlines.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -79,59 +79,6 @@ public:
performForwardCFA();
} while (m_changed);
- if (m_graph.m_form != SSA) {
- ASSERT(!m_changed);
-
- // Widen the abstract values at the block that serves as the must-handle OSR entry.
- for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
- BasicBlock* block = m_graph.block(blockIndex);
- if (!block)
- continue;
-
- if (!block->isOSRTarget)
- continue;
- if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex)
- continue;
-
- bool changed = false;
- for (size_t i = m_graph.m_plan.mustHandleValues.size(); i--;) {
- int operand = m_graph.m_plan.mustHandleValues.operandForIndex(i);
- JSValue value = m_graph.m_plan.mustHandleValues[i];
- Node* node = block->variablesAtHead.operand(operand);
- if (!node)
- continue;
-
- AbstractValue& target = block->valuesAtHead.operand(operand);
- changed |= target.mergeOSREntryValue(m_graph, value);
- target.fixTypeForRepresentation(
- m_graph, resultFor(node->variableAccessData()->flushFormat()));
- }
-
- if (changed || !block->cfaHasVisited) {
- m_changed = true;
- block->cfaShouldRevisit = true;
- }
- }
-
- // Propagate any of the changes we just introduced.
- while (m_changed) {
- m_changed = false;
- performForwardCFA();
- }
-
- // Make sure we record the intersection of all proofs that we ever allowed the
- // compiler to rely upon.
- for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
- BasicBlock* block = m_graph.block(blockIndex);
- if (!block)
- continue;
-
- block->intersectionOfCFAHasVisited &= block->cfaHasVisited;
- for (unsigned i = block->intersectionOfPastValuesAtHead.size(); i--;)
- block->intersectionOfPastValuesAtHead[i].filter(block->valuesAtHead[i]);
- }
- }
-
return true;
}
@@ -145,11 +92,8 @@ private:
if (m_verbose)
dataLog(" Block ", *block, ":\n");
m_state.beginBasicBlock(block);
- if (m_verbose) {
+ if (m_verbose)
dataLog(" head vars: ", block->valuesAtHead, "\n");
- if (m_graph.m_form == SSA)
- dataLog(" head regs: ", mapDump(block->ssa->valuesAtHead), "\n");
- }
for (unsigned i = 0; i < block->size(); ++i) {
if (m_verbose) {
Node* node = block->at(i);
@@ -158,8 +102,10 @@ private:
if (!safeToExecute(m_state, m_graph, node))
dataLog("(UNSAFE) ");
- dataLog(m_state.variables(), " ", m_interpreter);
+ m_interpreter.dump(WTF::dataFile());
+ if (m_state.haveStructures())
+ dataLog(" (Have Structures)");
dataLogF("\n");
}
if (!m_interpreter.execute(i)) {
@@ -175,11 +121,8 @@ private:
}
m_changed |= m_state.endBasicBlock(MergeToSuccessors);
- if (m_verbose) {
+ if (m_verbose)
dataLog(" tail vars: ", block->valuesAtTail, "\n");
- if (m_graph.m_form == SSA)
- dataLog(" head regs: ", mapDump(block->ssa->valuesAtTail), "\n");
- }
}
void performForwardCFA()
diff --git a/Source/JavaScriptCore/dfg/DFGCFAPhase.h b/Source/JavaScriptCore/dfg/DFGCFAPhase.h
index 30a69c4c6..cc9e6c4b4 100644
--- a/Source/JavaScriptCore/dfg/DFGCFAPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGCFAPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGCFAPhase_h
#define DFGCFAPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp b/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp
index 34a133624..5de36a0da 100644
--- a/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,7 +33,7 @@
#include "DFGInsertionSet.h"
#include "DFGPhase.h"
#include "DFGValidate.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -59,7 +59,7 @@ public:
continue;
ASSERT(block->isReachable);
- switch (block->terminal()->op()) {
+ switch (block->last()->op()) {
case Jump: {
// Successor with one predecessor -> merge.
if (block->successor(0)->predecessors.size() == 1) {
@@ -79,8 +79,6 @@ public:
// suboptimal, because if my successor has multiple predecessors then we'll
// be keeping alive things on other predecessor edges unnecessarily.
// What we really need is the notion of end-of-block ghosties!
- // FIXME: Allow putting phantoms after terminals.
- // https://bugs.webkit.org/show_bug.cgi?id=126778
break;
}
@@ -99,19 +97,17 @@ public:
if (extremeLogging)
m_graph.dump();
m_graph.dethread();
-
- Node* terminal = block->terminal();
- ASSERT(terminal->isTerminal());
- NodeOrigin boundaryNodeOrigin = terminal->origin;
-
- jettisonBlock(block, jettisonedBlock, boundaryNodeOrigin);
-
- block->replaceTerminal(
- m_graph, SpecNone, Jump, boundaryNodeOrigin,
- OpInfo(targetBlock));
-
- ASSERT(block->terminal());
+ ASSERT(block->last()->isTerminal());
+ CodeOrigin boundaryCodeOrigin = block->last()->codeOrigin;
+ block->last()->convertToPhantom();
+ ASSERT(block->last()->refCount() == 1);
+
+ jettisonBlock(block, jettisonedBlock, boundaryCodeOrigin);
+
+ block->appendNode(
+ m_graph, SpecNone, Jump, boundaryCodeOrigin,
+ OpInfo(targetBlock));
}
innerChanged = outerChanged = true;
break;
@@ -131,47 +127,44 @@ public:
}
case Switch: {
- SwitchData* data = block->terminal()->switchData();
+ SwitchData* data = block->last()->switchData();
// Prune out cases that end up jumping to default.
for (unsigned i = 0; i < data->cases.size(); ++i) {
- if (data->cases[i].target.block == data->fallThrough.block) {
- data->fallThrough.count += data->cases[i].target.count;
- data->cases[i--] = data->cases.last();
- data->cases.removeLast();
- }
+ if (data->cases[i].target == data->fallThrough)
+ data->cases[i--] = data->cases.takeLast();
}
// If there are no cases other than default then this turns
// into a jump.
if (data->cases.isEmpty()) {
- convertToJump(block, data->fallThrough.block);
+ convertToJump(block, data->fallThrough);
innerChanged = outerChanged = true;
break;
}
// Switch on constant -> jettison all other targets and merge.
- Node* terminal = block->terminal();
- if (terminal->child1()->hasConstant()) {
- FrozenValue* value = terminal->child1()->constant();
+ if (block->last()->child1()->hasConstant()) {
+ JSValue value = m_graph.valueOfJSConstant(block->last()->child1().node());
TriState found = FalseTriState;
BasicBlock* targetBlock = 0;
for (unsigned i = data->cases.size(); found == FalseTriState && i--;) {
found = data->cases[i].value.strictEqual(value);
if (found == TrueTriState)
- targetBlock = data->cases[i].target.block;
+ targetBlock = data->cases[i].target;
}
if (found == MixedTriState)
break;
if (found == FalseTriState)
- targetBlock = data->fallThrough.block;
+ targetBlock = data->fallThrough;
ASSERT(targetBlock);
Vector<BasicBlock*, 1> jettisonedBlocks;
- for (BasicBlock* successor : terminal->successors()) {
- if (successor != targetBlock)
- jettisonedBlocks.append(successor);
+ for (unsigned i = block->numSuccessors(); i--;) {
+ BasicBlock* jettisonedBlock = block->successor(i);
+ if (jettisonedBlock != targetBlock)
+ jettisonedBlocks.append(jettisonedBlock);
}
if (targetBlock->predecessors.size() == 1) {
@@ -185,13 +178,12 @@ public:
m_graph.dump();
m_graph.dethread();
- NodeOrigin boundaryNodeOrigin = terminal->origin;
-
+ CodeOrigin boundaryCodeOrigin = block->last()->codeOrigin;
+ block->last()->convertToPhantom();
for (unsigned i = jettisonedBlocks.size(); i--;)
- jettisonBlock(block, jettisonedBlocks[i], boundaryNodeOrigin);
-
- block->replaceTerminal(
- m_graph, SpecNone, Jump, boundaryNodeOrigin, OpInfo(targetBlock));
+ jettisonBlock(block, jettisonedBlocks[i], boundaryCodeOrigin);
+ block->appendNode(
+ m_graph, SpecNone, Jump, boundaryCodeOrigin, OpInfo(targetBlock));
}
innerChanged = outerChanged = true;
break;
@@ -256,40 +248,36 @@ private:
m_graph.dethread();
mergeBlocks(block, targetBlock, noBlocks());
} else {
- Node* branch = block->terminal();
+ Node* branch = block->last();
+ ASSERT(branch->isTerminal());
ASSERT(branch->op() == Branch || branch->op() == Switch);
-
- block->replaceTerminal(
- m_graph, SpecNone, Jump, branch->origin, OpInfo(targetBlock));
+ branch->convertToPhantom();
+ ASSERT(branch->refCount() == 1);
+
+ block->appendNode(
+ m_graph, SpecNone, Jump, branch->codeOrigin,
+ OpInfo(targetBlock));
}
}
-
- void keepOperandAlive(BasicBlock* block, BasicBlock* jettisonedBlock, NodeOrigin nodeOrigin, VirtualRegister operand)
+
+ void keepOperandAlive(BasicBlock* block, BasicBlock* jettisonedBlock, CodeOrigin codeOrigin, VirtualRegister operand)
{
Node* livenessNode = jettisonedBlock->variablesAtHead.operand(operand);
if (!livenessNode)
return;
- NodeType nodeType;
- if (livenessNode->flags() & NodeIsFlushed)
- nodeType = Flush;
- else {
- // This seems like it shouldn't be necessary because we could just rematerialize
- // PhantomLocals or something similar using bytecode liveness. However, in ThreadedCPS, it's
- // worth the sanity to maintain this eagerly. See
- // https://bugs.webkit.org/show_bug.cgi?id=144086
- nodeType = PhantomLocal;
- }
+ if (livenessNode->variableAccessData()->isCaptured())
+ return;
block->appendNode(
- m_graph, SpecNone, nodeType, nodeOrigin,
+ m_graph, SpecNone, PhantomLocal, codeOrigin,
OpInfo(livenessNode->variableAccessData()));
}
- void jettisonBlock(BasicBlock* block, BasicBlock* jettisonedBlock, NodeOrigin boundaryNodeOrigin)
+ void jettisonBlock(BasicBlock* block, BasicBlock* jettisonedBlock, CodeOrigin boundaryCodeOrigin)
{
for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfArguments(); ++i)
- keepOperandAlive(block, jettisonedBlock, boundaryNodeOrigin, virtualRegisterForArgument(i));
+ keepOperandAlive(block, jettisonedBlock, boundaryCodeOrigin, virtualRegisterForArgument(i));
for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfLocals(); ++i)
- keepOperandAlive(block, jettisonedBlock, boundaryNodeOrigin, virtualRegisterForLocal(i));
+ keepOperandAlive(block, jettisonedBlock, boundaryCodeOrigin, virtualRegisterForLocal(i));
fixJettisonedPredecessors(block, jettisonedBlock);
}
@@ -322,12 +310,11 @@ private:
// kept alive.
// Remove the terminal of firstBlock since we don't need it anymore. Well, we don't
- // really remove it; we actually turn it into a check.
- Node* terminal = firstBlock->terminal();
- ASSERT(terminal->isTerminal());
- NodeOrigin boundaryNodeOrigin = terminal->origin;
- terminal->remove();
- ASSERT(terminal->refCount() == 1);
+ // really remove it; we actually turn it into a Phantom.
+ ASSERT(firstBlock->last()->isTerminal());
+ CodeOrigin boundaryCodeOrigin = firstBlock->last()->codeOrigin;
+ firstBlock->last()->convertToPhantom();
+ ASSERT(firstBlock->last()->refCount() == 1);
for (unsigned i = jettisonedBlocks.size(); i--;) {
BasicBlock* jettisonedBlock = jettisonedBlocks[i];
@@ -337,9 +324,9 @@ private:
// different path than secondBlock.
for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfArguments(); ++i)
- keepOperandAlive(firstBlock, jettisonedBlock, boundaryNodeOrigin, virtualRegisterForArgument(i));
+ keepOperandAlive(firstBlock, jettisonedBlock, boundaryCodeOrigin, virtualRegisterForArgument(i));
for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfLocals(); ++i)
- keepOperandAlive(firstBlock, jettisonedBlock, boundaryNodeOrigin, virtualRegisterForLocal(i));
+ keepOperandAlive(firstBlock, jettisonedBlock, boundaryCodeOrigin, virtualRegisterForLocal(i));
}
for (size_t i = 0; i < secondBlock->phis.size(); ++i)
@@ -348,7 +335,7 @@ private:
for (size_t i = 0; i < secondBlock->size(); ++i)
firstBlock->append(secondBlock->at(i));
- ASSERT(firstBlock->terminal()->isTerminal());
+ ASSERT(firstBlock->last()->isTerminal());
// Fix the predecessors of my new successors. This is tricky, since we are going to reset
// all predecessors anyway due to reachability analysis. But we need to fix the
diff --git a/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.h b/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.h
index 0007fc9d2..a0f4856a4 100644
--- a/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGCFGSimplificationPhase_h
#define DFGCFGSimplificationPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.cpp b/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.cpp
index 09dbf328b..5f646f3a0 100644
--- a/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,7 +31,7 @@
#include "DFGBasicBlockInlines.h"
#include "DFGGraph.h"
#include "DFGPhase.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -44,8 +44,6 @@ public:
bool run()
{
- RELEASE_ASSERT(m_graph.m_refCountState == EverythingIsLive);
-
if (m_graph.m_form == ThreadedCPS)
return false;
@@ -53,10 +51,8 @@ public:
freeUnnecessaryNodes();
m_graph.clearReplacements();
canonicalizeLocalsInBlocks();
- specialCaseArguments();
propagatePhis<LocalOperand>();
propagatePhis<ArgumentOperand>();
- computeIsFlushed();
m_graph.m_form = ThreadedCPS;
return true;
@@ -91,15 +87,13 @@ private:
node->children.setChild1(Edge());
break;
case Phantom:
- if (!node->child1()) {
- m_graph.m_allocator.free(node);
+ if (!node->child1())
continue;
- }
switch (node->child1()->op()) {
case Phi:
case SetArgument:
case SetLocal:
- node->convertPhantomToPhantomLocal();
+ node->convertToPhantomLocal();
break;
default:
ASSERT(node->child1()->hasResult());
@@ -120,37 +114,37 @@ private:
}
template<OperandKind operandKind>
- void clearVariables()
+ void clearVariablesAtHeadAndTail()
{
ASSERT(
m_block->variablesAtHead.sizeFor<operandKind>()
== m_block->variablesAtTail.sizeFor<operandKind>());
for (unsigned i = m_block->variablesAtHead.sizeFor<operandKind>(); i--;) {
- m_block->variablesAtHead.atFor<operandKind>(i) = nullptr;
- m_block->variablesAtTail.atFor<operandKind>(i) = nullptr;
+ m_block->variablesAtHead.atFor<operandKind>(i) = 0;
+ m_block->variablesAtTail.atFor<operandKind>(i) = 0;
}
}
- ALWAYS_INLINE Node* addPhiSilently(BasicBlock* block, const NodeOrigin& origin, VariableAccessData* variable)
+ ALWAYS_INLINE Node* addPhiSilently(BasicBlock* block, const CodeOrigin& codeOrigin, VariableAccessData* variable)
{
- Node* result = m_graph.addNode(SpecNone, Phi, origin, OpInfo(variable));
+ Node* result = m_graph.addNode(SpecNone, Phi, codeOrigin, OpInfo(variable));
block->phis.append(result);
return result;
}
template<OperandKind operandKind>
- ALWAYS_INLINE Node* addPhi(BasicBlock* block, const NodeOrigin& origin, VariableAccessData* variable, size_t index)
+ ALWAYS_INLINE Node* addPhi(BasicBlock* block, const CodeOrigin& codeOrigin, VariableAccessData* variable, size_t index)
{
- Node* result = addPhiSilently(block, origin, variable);
+ Node* result = addPhiSilently(block, codeOrigin, variable);
phiStackFor<operandKind>().append(PhiStackEntry(block, index, result));
return result;
}
template<OperandKind operandKind>
- ALWAYS_INLINE Node* addPhi(const NodeOrigin& origin, VariableAccessData* variable, size_t index)
+ ALWAYS_INLINE Node* addPhi(const CodeOrigin& codeOrigin, VariableAccessData* variable, size_t index)
{
- return addPhi<operandKind>(m_block, origin, variable, index);
+ return addPhi<operandKind>(m_block, codeOrigin, variable, index);
}
template<OperandKind operandKind>
@@ -187,19 +181,34 @@ private:
return;
}
+ if (variable->isCaptured()) {
+ variable->setIsLoadedFrom(true);
+ if (otherNode->op() == GetLocal)
+ otherNode = otherNode->child1().node();
+ else
+ ASSERT(otherNode->op() == SetLocal || otherNode->op() == SetArgument);
+
+ ASSERT(otherNode->op() == Phi || otherNode->op() == SetLocal || otherNode->op() == SetArgument);
+
+ // Keep this GetLocal but link it to the prior ones.
+ node->children.setChild1(Edge(otherNode));
+ m_block->variablesAtTail.atFor<operandKind>(idx) = node;
+ return;
+ }
+
if (otherNode->op() == GetLocal) {
// Replace all references to this GetLocal with otherNode.
- node->replaceWith(otherNode);
+ node->misc.replacement = otherNode;
return;
}
ASSERT(otherNode->op() == SetLocal);
- node->replaceWith(otherNode->child1().node());
+ node->misc.replacement = otherNode->child1().node();
return;
}
variable->setIsLoadedFrom(true);
- Node* phi = addPhi<operandKind>(node->origin, variable, idx);
+ Node* phi = addPhi<operandKind>(node->codeOrigin, variable, idx);
node->children.setChild1(Edge(phi));
m_block->variablesAtHead.atFor<operandKind>(idx) = phi;
m_block->variablesAtTail.atFor<operandKind>(idx) = node;
@@ -214,6 +223,11 @@ private:
canonicalizeGetLocalFor<LocalOperand>(node, variable, variable->local().toLocal());
}
+ void canonicalizeSetLocal(Node* node)
+ {
+ m_block->variablesAtTail.setOperand(node->local(), node);
+ }
+
template<NodeType nodeType, OperandKind operandKind>
void canonicalizeFlushOrPhantomLocalFor(Node* node, VariableAccessData* variable, size_t idx)
{
@@ -240,9 +254,13 @@ private:
// for the purpose of OSR. PhantomLocal(SetLocal) means: at this point I
// know that I would have read the value written by that SetLocal. This is
// redundant and inefficient, since really it just means that we want to
- // keep the last MovHinted value of that local alive.
+ // be keeping the operand to the SetLocal alive. The SetLocal may die, and
+ // we'll be fine because OSR tracks dead SetLocals.
+
+ // So we turn this into a Phantom on the child of the SetLocal.
- node->remove();
+ node->convertToPhantom();
+ node->children.setChild1(otherNode->child1());
return;
}
@@ -258,7 +276,7 @@ private:
}
variable->setIsLoadedFrom(true);
- node->children.setChild1(Edge(addPhi<operandKind>(node->origin, variable, idx)));
+ node->children.setChild1(Edge(addPhi<operandKind>(node->codeOrigin, variable, idx)));
m_block->variablesAtHead.atFor<operandKind>(idx) = node;
m_block->variablesAtTail.atFor<operandKind>(idx) = node;
}
@@ -273,9 +291,13 @@ private:
canonicalizeFlushOrPhantomLocalFor<nodeType, LocalOperand>(node, variable, variable->local().toLocal());
}
- void canonicalizeSet(Node* node)
+ void canonicalizeSetArgument(Node* node)
{
- m_block->variablesAtTail.setOperand(node->local(), node);
+ VirtualRegister local = node->local();
+ ASSERT(local.isArgument());
+ int argument = local.toArgument();
+ m_block->variablesAtHead.setArgumentFirstTime(argument, node);
+ m_block->variablesAtTail.setArgumentFirstTime(argument, node);
}
void canonicalizeLocalsInBlock()
@@ -284,8 +306,8 @@ private:
return;
ASSERT(m_block->isReachable);
- clearVariables<ArgumentOperand>();
- clearVariables<LocalOperand>();
+ clearVariablesAtHeadAndTail<ArgumentOperand>();
+ clearVariablesAtHeadAndTail<LocalOperand>();
// Assumes that all phi references have been removed. Assumes that things that
// should be live have a non-zero ref count, but doesn't assume that the ref
@@ -314,8 +336,10 @@ private:
// there ever was a SetLocal and it was followed by Flushes, then the tail
// variable will be a SetLocal and not those subsequent Flushes.
//
- // Child of GetLocal: the operation that the GetLocal keeps alive. It may be
- // a Phi from the current block. For arguments, it may be a SetArgument.
+ // Child of GetLocal: the operation that the GetLocal keeps alive. For
+ // uncaptured locals, it may be a Phi from the current block. For arguments,
+ // it may be a SetArgument. For captured locals and arguments it may also be
+ // a SetLocal.
//
// Child of SetLocal: must be a value producing node.
//
@@ -338,7 +362,7 @@ private:
break;
case SetLocal:
- canonicalizeSet(node);
+ canonicalizeSetLocal(node);
break;
case Flush:
@@ -350,7 +374,7 @@ private:
break;
case SetArgument:
- canonicalizeSet(node);
+ canonicalizeSetArgument(node);
break;
default:
@@ -369,16 +393,6 @@ private:
}
}
- void specialCaseArguments()
- {
- // Normally, a SetArgument denotes the start of a live range for a local's value on the stack.
- // But those SetArguments used for the actual arguments to the machine CodeBlock get
- // special-cased. We could have instead used two different node types - one for the arguments
- // at the prologue case, and another for the other uses. But this seemed like IR overkill.
- for (unsigned i = m_graph.m_arguments.size(); i--;)
- m_graph.block(0)->variablesAtHead.setArgumentFirstTime(i, m_graph.m_arguments[i]);
- }
-
template<OperandKind operandKind>
void propagatePhis()
{
@@ -404,7 +418,7 @@ private:
Node* variableInPrevious = predecessorBlock->variablesAtTail.atFor<operandKind>(index);
if (!variableInPrevious) {
- variableInPrevious = addPhi<operandKind>(predecessorBlock, currentPhi->origin, variable, index);
+ variableInPrevious = addPhi<operandKind>(predecessorBlock, currentPhi->codeOrigin, variable, index);
predecessorBlock->variablesAtTail.atFor<operandKind>(index) = variableInPrevious;
predecessorBlock->variablesAtHead.atFor<operandKind>(index) = variableInPrevious;
} else {
@@ -438,7 +452,7 @@ private:
continue;
}
- Node* newPhi = addPhiSilently(block, currentPhi->origin, variable);
+ Node* newPhi = addPhiSilently(block, currentPhi->codeOrigin, variable);
newPhi->children = currentPhi->children;
currentPhi->children.initialize(newPhi, variableInPrevious, 0);
}
@@ -466,56 +480,9 @@ private:
return m_localPhiStack;
}
- void computeIsFlushed()
- {
- m_graph.clearFlagsOnAllNodes(NodeIsFlushed);
-
- for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
- BasicBlock* block = m_graph.block(blockIndex);
- if (!block)
- continue;
- for (unsigned nodeIndex = block->size(); nodeIndex--;) {
- Node* node = block->at(nodeIndex);
- if (node->op() != Flush)
- continue;
- addFlushedLocalOp(node);
- }
- }
- while (!m_flushedLocalOpWorklist.isEmpty()) {
- Node* node = m_flushedLocalOpWorklist.takeLast();
- switch (node->op()) {
- case SetLocal:
- case SetArgument:
- break;
-
- case Flush:
- case Phi:
- ASSERT(node->flags() & NodeIsFlushed);
- DFG_NODE_DO_TO_CHILDREN(m_graph, node, addFlushedLocalEdge);
- break;
-
- default:
- DFG_CRASH(m_graph, node, "Invalid node in flush graph");
- break;
- }
- }
- }
-
- void addFlushedLocalOp(Node* node)
- {
- if (node->mergeFlags(NodeIsFlushed))
- m_flushedLocalOpWorklist.append(node);
- }
-
- void addFlushedLocalEdge(Node*, Edge edge)
- {
- addFlushedLocalOp(edge.node());
- }
-
BasicBlock* m_block;
Vector<PhiStackEntry, 128> m_argumentPhiStack;
Vector<PhiStackEntry, 128> m_localPhiStack;
- Vector<Node*, 128> m_flushedLocalOpWorklist;
};
bool performCPSRethreading(Graph& graph)
diff --git a/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.h b/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.h
index 755bc799d..128847f2e 100644
--- a/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGCPSRethreadingPhase_h
#define DFGCPSRethreadingPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
index a3b867616..a4e159e73 100644
--- a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,65 +28,30 @@
#if ENABLE(DFG_JIT)
-#include "DFGAbstractHeap.h"
-#include "DFGBlockMapInlines.h"
-#include "DFGClobberSet.h"
-#include "DFGClobberize.h"
#include "DFGEdgeUsesStructure.h"
#include "DFGGraph.h"
#include "DFGPhase.h"
-#include "JSCInlines.h"
+#include "JSCellInlines.h"
#include <array>
#include <wtf/FastBitVector.h>
namespace JSC { namespace DFG {
-// This file contains two CSE implementations: local and global. LocalCSE typically runs when we're
-// in DFG mode, i.e. we want to compile quickly. LocalCSE contains a lot of optimizations for
-// compile time. GlobalCSE, on the other hand, is fairly straight-forward. It will find more
-// optimization opportunities by virtue of being global.
+enum CSEMode { NormalCSE, StoreElimination };
-namespace {
-
-const bool verbose = false;
-
-class ClobberFilter {
+template<CSEMode cseMode>
+class CSEPhase : public Phase {
public:
- ClobberFilter(AbstractHeap heap)
- : m_heap(heap)
- {
- }
-
- bool operator()(const ImpureMap::KeyValuePairType& pair) const
- {
- return m_heap.overlaps(pair.key.heap());
- }
-
-private:
- AbstractHeap m_heap;
-};
-
-inline void clobber(ImpureMap& map, AbstractHeap heap)
-{
- ClobberFilter filter(heap);
- map.removeIf(filter);
-}
-
-class LocalCSEPhase : public Phase {
-public:
- LocalCSEPhase(Graph& graph)
- : Phase(graph, "local common subexpression elimination")
- , m_smallBlock(graph)
- , m_largeBlock(graph)
+ CSEPhase(Graph& graph)
+ : Phase(graph, cseMode == NormalCSE ? "common subexpression elimination" : "store elimination")
{
}
bool run()
{
- ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
- ASSERT(m_graph.m_form == ThreadedCPS || m_graph.m_form == LoadStore);
+ ASSERT(m_graph.m_fixpointState != BeforeFixpoint);
- bool changed = false;
+ m_changed = false;
m_graph.clearReplacements();
@@ -95,621 +60,1389 @@ public:
if (!block)
continue;
- if (block->size() <= SmallMaps::capacity)
- changed |= m_smallBlock.run(block);
- else
- changed |= m_largeBlock.run(block);
+ // All Phis need to already be marked as relevant to OSR.
+ if (!ASSERT_DISABLED) {
+ for (unsigned i = 0; i < block->phis.size(); ++i)
+ ASSERT(block->phis[i]->flags() & NodeRelevantToOSR);
+ }
+
+ for (unsigned i = block->size(); i--;) {
+ Node* node = block->at(i);
+
+ switch (node->op()) {
+ case SetLocal:
+ case GetLocal: // FIXME: The GetLocal case is only necessary until we do https://bugs.webkit.org/show_bug.cgi?id=106707.
+ node->mergeFlags(NodeRelevantToOSR);
+ break;
+ default:
+ node->clearFlags(NodeRelevantToOSR);
+ break;
+ }
+ }
}
- return changed;
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+
+ for (unsigned i = block->size(); i--;) {
+ Node* node = block->at(i);
+ if (!node->containsMovHint())
+ continue;
+
+ ASSERT(node->op() != ZombieHint);
+ node->child1()->mergeFlags(NodeRelevantToOSR);
+ }
+ }
+
+ if (m_graph.m_form == SSA) {
+ Vector<BasicBlock*> depthFirst;
+ m_graph.getBlocksInDepthFirstOrder(depthFirst);
+ for (unsigned i = 0; i < depthFirst.size(); ++i)
+ performBlockCSE(depthFirst[i]);
+ } else {
+ for (unsigned blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex)
+ performBlockCSE(m_graph.block(blockIndex));
+ }
+
+ return m_changed;
}
private:
- class SmallMaps {
- public:
- // This permits SmallMaps to be used for blocks that have up to 100 nodes. In practice,
- // fewer than half of the nodes in a block have pure defs, and even fewer have impure defs.
- // Thus, a capacity limit of 100 probably means that somewhere around ~40 things may end up
- // in one of these "small" list-based maps. That number still seems largeish, except that
- // the overhead of HashMaps can be quite high currently: clearing them, or even removing
- // enough things from them, deletes (or resizes) their backing store eagerly. Hence
- // HashMaps induce a lot of malloc traffic.
- static const unsigned capacity = 100;
-
- SmallMaps()
- : m_pureLength(0)
- , m_impureLength(0)
- {
+
+ unsigned endIndexForPureCSE()
+ {
+ unsigned result = m_lastSeen[m_currentNode->op()];
+ if (result == UINT_MAX)
+ result = 0;
+ else
+ result++;
+ ASSERT(result <= m_indexInBlock);
+ return result;
+ }
+
+ Node* pureCSE(Node* node)
+ {
+ Edge child1 = node->child1();
+ Edge child2 = node->child2();
+ Edge child3 = node->child3();
+
+ for (unsigned i = endIndexForPureCSE(); i--;) {
+ Node* otherNode = m_currentBlock->at(i);
+ if (otherNode == child1 || otherNode == child2 || otherNode == child3)
+ break;
+
+ if (node->op() != otherNode->op())
+ continue;
+
+ if (node->hasArithMode()) {
+ if (node->arithMode() != otherNode->arithMode())
+ continue;
+ }
+
+ Edge otherChild = otherNode->child1();
+ if (!otherChild)
+ return otherNode;
+ if (otherChild != child1)
+ continue;
+
+ otherChild = otherNode->child2();
+ if (!otherChild)
+ return otherNode;
+ if (otherChild != child2)
+ continue;
+
+ otherChild = otherNode->child3();
+ if (!otherChild)
+ return otherNode;
+ if (otherChild != child3)
+ continue;
+
+ return otherNode;
}
+ return 0;
+ }
- void clear()
- {
- m_pureLength = 0;
- m_impureLength = 0;
+ Node* int32ToDoubleCSE(Node* node)
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* otherNode = m_currentBlock->at(i);
+ if (otherNode == node->child1())
+ return 0;
+ switch (otherNode->op()) {
+ case Int32ToDouble:
+ if (otherNode->child1() == node->child1())
+ return otherNode;
+ break;
+ default:
+ break;
+ }
}
+ return 0;
+ }
- void write(AbstractHeap heap)
- {
- for (unsigned i = 0; i < m_impureLength; ++i) {
- if (heap.overlaps(m_impureMap[i].key.heap()))
- m_impureMap[i--] = m_impureMap[--m_impureLength];
+ Node* constantCSE(Node* node)
+ {
+ for (unsigned i = endIndexForPureCSE(); i--;) {
+ Node* otherNode = m_currentBlock->at(i);
+ if (otherNode->op() != JSConstant)
+ continue;
+
+ if (otherNode->constantNumber() != node->constantNumber())
+ continue;
+
+ return otherNode;
+ }
+ return 0;
+ }
+
+ Node* weakConstantCSE(Node* node)
+ {
+ for (unsigned i = endIndexForPureCSE(); i--;) {
+ Node* otherNode = m_currentBlock->at(i);
+ if (otherNode->op() != WeakJSConstant)
+ continue;
+
+ if (otherNode->weakConstant() != node->weakConstant())
+ continue;
+
+ return otherNode;
+ }
+ return 0;
+ }
+
+ Node* constantStoragePointerCSE(Node* node)
+ {
+ for (unsigned i = endIndexForPureCSE(); i--;) {
+ Node* otherNode = m_currentBlock->at(i);
+ if (otherNode->op() != ConstantStoragePointer)
+ continue;
+
+ if (otherNode->storagePointer() != node->storagePointer())
+ continue;
+
+ return otherNode;
+ }
+ return 0;
+ }
+
+ Node* getCalleeLoadElimination()
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ switch (node->op()) {
+ case GetCallee:
+ return node;
+ default:
+ break;
}
}
+ return 0;
+ }
- Node* addPure(PureValue value, Node* node)
- {
- for (unsigned i = m_pureLength; i--;) {
- if (m_pureMap[i].key == value)
- return m_pureMap[i].value;
+ Node* getArrayLengthElimination(Node* array)
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ switch (node->op()) {
+ case GetArrayLength:
+ if (node->child1() == array)
+ return node;
+ break;
+
+ case PutByValDirect:
+ case PutByVal:
+ if (!m_graph.byValIsPure(node))
+ return 0;
+ if (node->arrayMode().mayStoreToHole())
+ return 0;
+ break;
+
+ default:
+ if (m_graph.clobbersWorld(node))
+ return 0;
}
-
- ASSERT(m_pureLength < capacity);
- m_pureMap[m_pureLength++] = WTF::KeyValuePair<PureValue, Node*>(value, node);
- return nullptr;
}
-
- LazyNode findReplacement(HeapLocation location)
- {
- for (unsigned i = m_impureLength; i--;) {
- if (m_impureMap[i].key == location)
- return m_impureMap[i].value;
+ return 0;
+ }
+
+ Node* globalVarLoadElimination(WriteBarrier<Unknown>* registerPointer)
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ switch (node->op()) {
+ case GetGlobalVar:
+ if (node->registerPointer() == registerPointer)
+ return node;
+ break;
+ case PutGlobalVar:
+ if (node->registerPointer() == registerPointer)
+ return node->child1().node();
+ break;
+ default:
+ break;
}
- return nullptr;
+ if (m_graph.clobbersWorld(node))
+ break;
}
+ return 0;
+ }
- LazyNode addImpure(HeapLocation location, LazyNode node)
- {
- // FIXME: If we are using small maps, we must not def() derived values.
- // For now the only derived values we def() are constant-based.
- if (location.index() && !location.index().isNode())
- return nullptr;
- if (LazyNode result = findReplacement(location))
- return result;
- ASSERT(m_impureLength < capacity);
- m_impureMap[m_impureLength++] = WTF::KeyValuePair<HeapLocation, LazyNode>(location, node);
- return nullptr;
+ Node* scopedVarLoadElimination(Node* registers, int varNumber)
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ switch (node->op()) {
+ case GetClosureVar: {
+ if (node->child1() == registers && node->varNumber() == varNumber)
+ return node;
+ break;
+ }
+ case PutClosureVar: {
+ if (node->varNumber() != varNumber)
+ break;
+ if (node->child2() == registers)
+ return node->child3().node();
+ return 0;
+ }
+ case SetLocal: {
+ VariableAccessData* variableAccessData = node->variableAccessData();
+ if (variableAccessData->isCaptured()
+ && variableAccessData->local() == static_cast<VirtualRegister>(varNumber))
+ return 0;
+ break;
+ }
+ default:
+ break;
+ }
+ if (m_graph.clobbersWorld(node))
+ break;
}
+ return 0;
+ }
- private:
- WTF::KeyValuePair<PureValue, Node*> m_pureMap[capacity];
- WTF::KeyValuePair<HeapLocation, LazyNode> m_impureMap[capacity];
- unsigned m_pureLength;
- unsigned m_impureLength;
- };
-
- class LargeMaps {
- public:
- LargeMaps()
- {
+ bool varInjectionWatchpointElimination()
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ if (node->op() == VarInjectionWatchpoint)
+ return true;
+ if (m_graph.clobbersWorld(node))
+ break;
}
+ return false;
+ }
- void clear()
- {
- m_pureMap.clear();
- m_impureMap.clear();
+ Node* globalVarStoreElimination(WriteBarrier<Unknown>* registerPointer)
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ switch (node->op()) {
+ case PutGlobalVar:
+ if (node->registerPointer() == registerPointer)
+ return node;
+ break;
+
+ case GetGlobalVar:
+ if (node->registerPointer() == registerPointer)
+ return 0;
+ break;
+
+ default:
+ break;
+ }
+ if (m_graph.clobbersWorld(node) || node->canExit())
+ return 0;
}
+ return 0;
+ }
- void write(AbstractHeap heap)
- {
- clobber(m_impureMap, heap);
+ Node* scopedVarStoreElimination(Node* scope, Node* registers, int varNumber)
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ switch (node->op()) {
+ case PutClosureVar: {
+ if (node->varNumber() != varNumber)
+ break;
+ if (node->child1() == scope && node->child2() == registers)
+ return node;
+ return 0;
+ }
+
+ case GetClosureVar: {
+ // Let's be conservative.
+ if (node->varNumber() == varNumber)
+ return 0;
+ break;
+ }
+
+ case GetLocal:
+ case SetLocal: {
+ VariableAccessData* variableAccessData = node->variableAccessData();
+ if (variableAccessData->isCaptured()
+ && variableAccessData->local() == static_cast<VirtualRegister>(varNumber))
+ return 0;
+ break;
+ }
+
+ default:
+ break;
+ }
+ if (m_graph.clobbersWorld(node) || node->canExit())
+ return 0;
}
+ return 0;
+ }
- Node* addPure(PureValue value, Node* node)
- {
- auto result = m_pureMap.add(value, node);
- if (result.isNewEntry)
- return nullptr;
- return result.iterator->value;
+ Node* getByValLoadElimination(Node* child1, Node* child2, ArrayMode arrayMode)
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ if (node == child1 || node == child2)
+ break;
+
+ switch (node->op()) {
+ case GetByVal:
+ if (!m_graph.byValIsPure(node))
+ return 0;
+ if (node->child1() == child1
+ && node->child2() == child2
+ && node->arrayMode().type() == arrayMode.type())
+ return node;
+ break;
+
+ case PutByValDirect:
+ case PutByVal:
+ case PutByValAlias: {
+ if (!m_graph.byValIsPure(node))
+ return 0;
+ // Typed arrays
+ if (arrayMode.typedArrayType() != NotTypedArray)
+ return 0;
+ if (m_graph.varArgChild(node, 0) == child1
+ && m_graph.varArgChild(node, 1) == child2
+ && node->arrayMode().type() == arrayMode.type())
+ return m_graph.varArgChild(node, 2).node();
+ // We must assume that the PutByVal will clobber the location we're getting from.
+ // FIXME: We can do better; if we know that the PutByVal is accessing an array of a
+ // different type than the GetByVal, then we know that they won't clobber each other.
+ // ... except of course for typed arrays, where all typed arrays clobber all other
+ // typed arrays! An Int32Array can alias a Float64Array for example, and so on.
+ return 0;
+ }
+ default:
+ if (m_graph.clobbersWorld(node))
+ return 0;
+ break;
+ }
}
-
- LazyNode findReplacement(HeapLocation location)
- {
- return m_impureMap.get(location);
+ return 0;
+ }
+
+ bool checkFunctionElimination(JSCell* function, Node* child1)
+ {
+ for (unsigned i = endIndexForPureCSE(); i--;) {
+ Node* node = m_currentBlock->at(i);
+ if (node == child1)
+ break;
+
+ if (node->op() == CheckFunction && node->child1() == child1 && node->function() == function)
+ return true;
}
+ return false;
+ }
- LazyNode addImpure(HeapLocation location, LazyNode node)
- {
- auto result = m_impureMap.add(location, node);
- if (result.isNewEntry)
- return nullptr;
- return result.iterator->value;
+ bool checkExecutableElimination(ExecutableBase* executable, Node* child1)
+ {
+ for (unsigned i = endIndexForPureCSE(); i--;) {
+ Node* node = m_currentBlock->at(i);
+ if (node == child1)
+ break;
+
+ if (node->op() == CheckExecutable && node->child1() == child1 && node->executable() == executable)
+ return true;
}
+ return false;
+ }
- private:
- HashMap<PureValue, Node*> m_pureMap;
- HashMap<HeapLocation, LazyNode> m_impureMap;
- };
+ bool checkStructureElimination(const StructureSet& structureSet, Node* child1)
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ if (node == child1)
+ break;
- template<typename Maps>
- class BlockCSE {
- public:
- BlockCSE(Graph& graph)
- : m_graph(graph)
- , m_insertionSet(graph)
- {
+ switch (node->op()) {
+ case CheckStructure:
+ if (node->child1() == child1
+ && structureSet.isSupersetOf(node->structureSet()))
+ return true;
+ break;
+
+ case StructureTransitionWatchpoint:
+ if (node->child1() == child1
+ && structureSet.contains(node->structure()))
+ return true;
+ break;
+
+ case PutStructure:
+ if (node->child1() == child1
+ && structureSet.contains(node->structureTransitionData().newStructure))
+ return true;
+ if (structureSet.contains(node->structureTransitionData().previousStructure))
+ return false;
+ break;
+
+ case PutByOffset:
+ // Setting a property cannot change the structure.
+ break;
+
+ case PutByValDirect:
+ case PutByVal:
+ case PutByValAlias:
+ if (m_graph.byValIsPure(node)) {
+ // If PutByVal speculates that it's accessing an array with an
+ // integer index, then it's impossible for it to cause a structure
+ // change.
+ break;
+ }
+ return false;
+
+ case Arrayify:
+ case ArrayifyToStructure:
+ // We could check if the arrayification could affect our structures.
+ // But that seems like it would take Effort.
+ return false;
+
+ default:
+ if (m_graph.clobbersWorld(node))
+ return false;
+ break;
+ }
}
+ return false;
+ }
- bool run(BasicBlock* block)
- {
- m_maps.clear();
- m_changed = false;
- m_block = block;
-
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- m_node = block->at(nodeIndex);
- m_graph.performSubstitution(m_node);
-
- if (m_node->op() == Identity) {
- m_node->replaceWith(m_node->child1().node());
- m_changed = true;
- } else {
- // This rule only makes sense for local CSE, since in SSA form we have already
- // factored the bounds check out of the PutByVal. It's kind of gross, but we
- // still have reason to believe that PutByValAlias is a good optimization and
- // that it's better to do it with a single node rather than separating out the
- // CheckInBounds.
- if (m_node->op() == PutByVal || m_node->op() == PutByValDirect) {
- HeapLocation heap;
-
- Node* base = m_graph.varArgChild(m_node, 0).node();
- Node* index = m_graph.varArgChild(m_node, 1).node();
-
- ArrayMode mode = m_node->arrayMode();
- switch (mode.type()) {
- case Array::Int32:
- if (!mode.isInBounds())
- break;
- heap = HeapLocation(
- IndexedPropertyLoc, IndexedInt32Properties, base, index);
- break;
-
- case Array::Double:
- if (!mode.isInBounds())
- break;
- heap = HeapLocation(
- IndexedPropertyLoc, IndexedDoubleProperties, base, index);
- break;
-
- case Array::Contiguous:
- if (!mode.isInBounds())
- break;
- heap = HeapLocation(
- IndexedPropertyLoc, IndexedContiguousProperties, base, index);
- break;
-
- case Array::Int8Array:
- case Array::Int16Array:
- case Array::Int32Array:
- case Array::Uint8Array:
- case Array::Uint8ClampedArray:
- case Array::Uint16Array:
- case Array::Uint32Array:
- case Array::Float32Array:
- case Array::Float64Array:
- if (!mode.isInBounds())
- break;
- heap = HeapLocation(
- IndexedPropertyLoc, TypedArrayProperties, base, index);
- break;
-
- default:
- break;
- }
-
- if (!!heap && m_maps.findReplacement(heap))
- m_node->setOp(PutByValAlias);
- }
-
- clobberize(m_graph, m_node, *this);
+ bool structureTransitionWatchpointElimination(Structure* structure, Node* child1)
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ if (node == child1)
+ break;
+
+ switch (node->op()) {
+ case CheckStructure:
+ if (node->child1() == child1
+ && node->structureSet().containsOnly(structure))
+ return true;
+ break;
+
+ case PutStructure:
+ ASSERT(node->structureTransitionData().previousStructure != structure);
+ break;
+
+ case PutByOffset:
+ // Setting a property cannot change the structure.
+ break;
+
+ case PutByValDirect:
+ case PutByVal:
+ case PutByValAlias:
+ if (m_graph.byValIsPure(node)) {
+ // If PutByVal speculates that it's accessing an array with an
+ // integer index, then it's impossible for it to cause a structure
+ // change.
+ break;
}
+ return false;
+
+ case StructureTransitionWatchpoint:
+ if (node->structure() == structure && node->child1() == child1)
+ return true;
+ break;
+
+ case Arrayify:
+ case ArrayifyToStructure:
+ // We could check if the arrayification could affect our structures.
+ // But that seems like it would take Effort.
+ return false;
+
+ default:
+ if (m_graph.clobbersWorld(node))
+ return false;
+ break;
}
-
- m_insertionSet.execute(block);
-
- return m_changed;
}
+ return false;
+ }
- void read(AbstractHeap) { }
-
- void write(AbstractHeap heap)
- {
- m_maps.write(heap);
+ Node* putStructureStoreElimination(Node* child1)
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ if (node == child1)
+ break;
+ switch (node->op()) {
+ case CheckStructure:
+ return 0;
+
+ case PhantomPutStructure:
+ if (node->child1() == child1) // No need to retrace our steps.
+ return 0;
+ break;
+
+ case PutStructure:
+ if (node->child1() == child1)
+ return node;
+ break;
+
+ // PutStructure needs to execute if we GC. Hence this needs to
+ // be careful with respect to nodes that GC.
+ case CreateArguments:
+ case TearOffArguments:
+ case NewFunctionNoCheck:
+ case NewFunction:
+ case NewFunctionExpression:
+ case CreateActivation:
+ case TearOffActivation:
+ case ToPrimitive:
+ case NewRegexp:
+ case NewArrayBuffer:
+ case NewArray:
+ case NewObject:
+ case CreateThis:
+ case AllocatePropertyStorage:
+ case ReallocatePropertyStorage:
+ case TypeOf:
+ case ToString:
+ case NewStringObject:
+ case MakeRope:
+ case NewTypedArray:
+ return 0;
+
+ // This either exits, causes a GC (lazy string allocation), or clobbers
+ // the world. The chances of it not doing any of those things are so
+ // slim that we might as well not even try to reason about it.
+ case GetByVal:
+ return 0;
+
+ case GetIndexedPropertyStorage:
+ if (node->arrayMode().getIndexedPropertyStorageMayTriggerGC())
+ return 0;
+ break;
+
+ default:
+ break;
+ }
+ if (m_graph.clobbersWorld(node) || node->canExit())
+ return 0;
+ if (edgesUseStructure(m_graph, node))
+ return 0;
}
-
- void def(PureValue value)
- {
- Node* match = m_maps.addPure(value, m_node);
- if (!match)
- return;
+ return 0;
+ }
+
+ Node* getByOffsetLoadElimination(unsigned identifierNumber, Node* child1)
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ if (node == child1)
+ break;
- m_node->replaceWith(match);
- m_changed = true;
+ switch (node->op()) {
+ case GetByOffset:
+ if (node->child1() == child1
+ && m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber == identifierNumber)
+ return node;
+ break;
+
+ case PutByOffset:
+ if (m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber == identifierNumber) {
+ if (node->child1() == child1) // Must be same property storage.
+ return node->child3().node();
+ return 0;
+ }
+ break;
+
+ case PutByValDirect:
+ case PutByVal:
+ case PutByValAlias:
+ if (m_graph.byValIsPure(node)) {
+ // If PutByVal speculates that it's accessing an array with an
+ // integer index, then it's impossible for it to cause a structure
+ // change.
+ break;
+ }
+ return 0;
+
+ default:
+ if (m_graph.clobbersWorld(node))
+ return 0;
+ break;
+ }
}
+ return 0;
+ }
- void def(HeapLocation location, LazyNode value)
- {
- LazyNode match = m_maps.addImpure(location, value);
- if (!match)
- return;
-
- if (m_node->op() == GetLocal) {
- // Usually the CPS rethreading phase does this. But it's OK for us to mess with
- // locals so long as:
- //
- // - We dethread the graph. Any changes we make may invalidate the assumptions of
- // our CPS form, particularly if this GetLocal is linked to the variablesAtTail.
- //
- // - We don't introduce a Phantom for the child of the GetLocal. This wouldn't be
- // totally wrong but it would pessimize the code. Just because there is a
- // GetLocal doesn't mean that the child was live. Simply rerouting the all uses
- // of this GetLocal will preserve the live-at-exit information just fine.
- //
- // We accomplish the latter by just clearing the child; then the Phantom that we
- // introduce won't have children and so it will eventually just be deleted.
-
- m_node->child1() = Edge();
- m_graph.dethread();
- }
-
- if (value.isNode() && value.asNode() == m_node) {
- match.ensureIsNode(m_insertionSet, m_block, 0)->owner = m_block;
- ASSERT(match.isNode());
- m_node->replaceWith(match.asNode());
- m_changed = true;
+ Node* putByOffsetStoreElimination(unsigned identifierNumber, Node* child1)
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ if (node == child1)
+ break;
+
+ switch (node->op()) {
+ case GetByOffset:
+ if (m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber == identifierNumber)
+ return 0;
+ break;
+
+ case PutByOffset:
+ if (m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber == identifierNumber) {
+ if (node->child1() == child1) // Must be same property storage.
+ return node;
+ return 0;
+ }
+ break;
+
+ case PutByValDirect:
+ case PutByVal:
+ case PutByValAlias:
+ case GetByVal:
+ if (m_graph.byValIsPure(node)) {
+ // If PutByVal speculates that it's accessing an array with an
+ // integer index, then it's impossible for it to cause a structure
+ // change.
+ break;
+ }
+ return 0;
+
+ default:
+ if (m_graph.clobbersWorld(node))
+ return 0;
+ break;
}
+ if (node->canExit())
+ return 0;
}
+ return 0;
+ }
- private:
- Graph& m_graph;
-
- bool m_changed;
- Node* m_node;
- BasicBlock* m_block;
+ Node* getPropertyStorageLoadElimination(Node* child1)
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ if (node == child1)
+ break;
+
+ switch (node->op()) {
+ case GetButterfly:
+ if (node->child1() == child1)
+ return node;
+ break;
+
+ case AllocatePropertyStorage:
+ case ReallocatePropertyStorage:
+ // If we can cheaply prove this is a change to our object's storage, we
+ // can optimize and use its result.
+ if (node->child1() == child1)
+ return node;
+ // Otherwise, we currently can't prove that this doesn't change our object's
+ // storage, so we conservatively assume that it may change the storage
+ // pointer of any object, including ours.
+ return 0;
+
+ case PutByValDirect:
+ case PutByVal:
+ case PutByValAlias:
+ if (m_graph.byValIsPure(node)) {
+ // If PutByVal speculates that it's accessing an array with an
+ // integer index, then it's impossible for it to cause a structure
+ // change.
+ break;
+ }
+ return 0;
+
+ case Arrayify:
+ case ArrayifyToStructure:
+ // We could check if the arrayification could affect our butterfly.
+ // But that seems like it would take Effort.
+ return 0;
+
+ default:
+ if (m_graph.clobbersWorld(node))
+ return 0;
+ break;
+ }
+ }
+ return 0;
+ }
- Maps m_maps;
+ bool checkArrayElimination(Node* child1, ArrayMode arrayMode)
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ if (node == child1)
+ break;
- InsertionSet m_insertionSet;
- };
+ switch (node->op()) {
+ case CheckArray:
+ if (node->child1() == child1 && node->arrayMode() == arrayMode)
+ return true;
+ break;
+
+ case Arrayify:
+ case ArrayifyToStructure:
+ // We could check if the arrayification could affect our array.
+ // But that seems like it would take Effort.
+ return false;
+
+ default:
+ if (m_graph.clobbersWorld(node))
+ return false;
+ break;
+ }
+ }
+ return false;
+ }
- BlockCSE<SmallMaps> m_smallBlock;
- BlockCSE<LargeMaps> m_largeBlock;
-};
+ Node* getIndexedPropertyStorageLoadElimination(Node* child1, ArrayMode arrayMode)
+ {
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ if (node == child1)
+ break;
-class GlobalCSEPhase : public Phase {
-public:
- GlobalCSEPhase(Graph& graph)
- : Phase(graph, "global common subexpression elimination")
- , m_impureDataMap(graph)
- , m_insertionSet(graph)
+ switch (node->op()) {
+ case GetIndexedPropertyStorage: {
+ if (node->child1() == child1 && node->arrayMode() == arrayMode)
+ return node;
+ break;
+ }
+
+ default:
+ if (m_graph.clobbersWorld(node))
+ return 0;
+ break;
+ }
+ }
+ return 0;
+ }
+
+ Node* getTypedArrayByteOffsetLoadElimination(Node* child1)
{
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ if (node == child1)
+ break;
+
+ switch (node->op()) {
+ case GetTypedArrayByteOffset: {
+ if (node->child1() == child1)
+ return node;
+ break;
+ }
+
+ default:
+ if (m_graph.clobbersWorld(node))
+ return 0;
+ break;
+ }
+ }
+ return 0;
}
- bool run()
+ Node* getMyScopeLoadElimination()
{
- ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
- ASSERT(m_graph.m_form == SSA);
-
- m_graph.initializeNodeOwners();
- m_graph.m_dominators.computeIfNecessary(m_graph);
-
- m_preOrder = m_graph.blocksInPreOrder();
-
- // First figure out what gets clobbered by blocks. Node that this uses the preOrder list
- // for convenience only.
- for (unsigned i = m_preOrder.size(); i--;) {
- m_block = m_preOrder[i];
- m_impureData = &m_impureDataMap[m_block];
- for (unsigned nodeIndex = m_block->size(); nodeIndex--;)
- addWrites(m_graph, m_block->at(nodeIndex), m_impureData->writes);
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ switch (node->op()) {
+ case CreateActivation:
+ // This may cause us to return a different scope.
+ return 0;
+ case GetMyScope:
+ return node;
+ default:
+ break;
+ }
}
-
- // Based on my experience doing this before, what follows might have to be made iterative.
- // Right now it doesn't have to be iterative because everything is dominator-bsed. But when
- // validation is enabled, we check if iterating would find new CSE opportunities.
-
- bool changed = iterate();
-
- // FIXME: It should be possible to assert that CSE will not find any new opportunities if you
- // run it a second time. Unfortunately, we cannot assert this right now. Note that if we did
- // this, we'd have to first reset all of our state.
- // https://bugs.webkit.org/show_bug.cgi?id=145853
-
- return changed;
+ return 0;
}
- bool iterate()
+ Node* getLocalLoadElimination(VirtualRegister local, Node*& relevantLocalOp, bool careAboutClobbering)
{
- if (verbose)
- dataLog("Performing iteration.\n");
+ relevantLocalOp = 0;
- m_changed = false;
- m_graph.clearReplacements();
-
- for (unsigned i = 0; i < m_preOrder.size(); ++i) {
- m_block = m_preOrder[i];
- m_impureData = &m_impureDataMap[m_block];
- m_writesSoFar.clear();
-
- if (verbose)
- dataLog("Processing block ", *m_block, ":\n");
-
- for (unsigned nodeIndex = 0; nodeIndex < m_block->size(); ++nodeIndex) {
- m_nodeIndex = nodeIndex;
- m_node = m_block->at(nodeIndex);
- if (verbose)
- dataLog(" Looking at node ", m_node, ":\n");
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ switch (node->op()) {
+ case GetLocal:
+ if (node->local() == local) {
+ relevantLocalOp = node;
+ return node;
+ }
+ break;
- m_graph.performSubstitution(m_node);
+ case GetLocalUnlinked:
+ if (node->unlinkedLocal() == local) {
+ relevantLocalOp = node;
+ return node;
+ }
+ break;
- if (m_node->op() == Identity) {
- m_node->replaceWith(m_node->child1().node());
- m_changed = true;
- } else
- clobberize(m_graph, m_node, *this);
+ case SetLocal:
+ if (node->local() == local) {
+ relevantLocalOp = node;
+ return node->child1().node();
+ }
+ break;
+
+ case GetClosureVar:
+ case PutClosureVar:
+ if (static_cast<VirtualRegister>(node->varNumber()) == local)
+ return 0;
+ break;
+
+ default:
+ if (careAboutClobbering && m_graph.clobbersWorld(node))
+ return 0;
+ break;
}
-
- m_insertionSet.execute(m_block);
-
- m_impureData->didVisit = true;
+ }
+ return 0;
+ }
+
+ struct SetLocalStoreEliminationResult {
+ SetLocalStoreEliminationResult()
+ : mayBeAccessed(false)
+ , mayExit(false)
+ , mayClobberWorld(false)
+ {
}
- return m_changed;
+ bool mayBeAccessed;
+ bool mayExit;
+ bool mayClobberWorld;
+ };
+ SetLocalStoreEliminationResult setLocalStoreElimination(
+ VirtualRegister local, Node* expectedNode)
+ {
+ SetLocalStoreEliminationResult result;
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* node = m_currentBlock->at(i);
+ switch (node->op()) {
+ case GetLocal:
+ case Flush:
+ if (node->local() == local)
+ result.mayBeAccessed = true;
+ break;
+
+ case GetLocalUnlinked:
+ if (node->unlinkedLocal() == local)
+ result.mayBeAccessed = true;
+ break;
+
+ case SetLocal: {
+ if (node->local() != local)
+ break;
+ if (node != expectedNode)
+ result.mayBeAccessed = true;
+ return result;
+ }
+
+ case GetClosureVar:
+ case PutClosureVar:
+ if (static_cast<VirtualRegister>(node->varNumber()) == local)
+ result.mayBeAccessed = true;
+ break;
+
+ case GetMyScope:
+ case SkipTopScope:
+ if (node->codeOrigin.inlineCallFrame)
+ break;
+ if (m_graph.uncheckedActivationRegister() == local)
+ result.mayBeAccessed = true;
+ break;
+
+ case CheckArgumentsNotCreated:
+ case GetMyArgumentsLength:
+ case GetMyArgumentsLengthSafe:
+ if (m_graph.uncheckedArgumentsRegisterFor(node->codeOrigin) == local)
+ result.mayBeAccessed = true;
+ break;
+
+ case GetMyArgumentByVal:
+ case GetMyArgumentByValSafe:
+ result.mayBeAccessed = true;
+ break;
+
+ case GetByVal:
+ // If this is accessing arguments then it's potentially accessing locals.
+ if (node->arrayMode().type() == Array::Arguments)
+ result.mayBeAccessed = true;
+ break;
+
+ case CreateArguments:
+ case TearOffActivation:
+ case TearOffArguments:
+ // If an activation is being torn off then it means that captured variables
+ // are live. We could be clever here and check if the local qualifies as an
+ // argument register. But that seems like it would buy us very little since
+ // any kind of tear offs are rare to begin with.
+ result.mayBeAccessed = true;
+ break;
+
+ default:
+ break;
+ }
+ result.mayExit |= node->canExit();
+ result.mayClobberWorld |= m_graph.clobbersWorld(node);
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ // Be safe in release mode.
+ result.mayBeAccessed = true;
+ return result;
}
-
- void read(AbstractHeap) { }
- void write(AbstractHeap heap)
+ void eliminateIrrelevantPhantomChildren(Node* node)
{
- clobber(m_impureData->availableAtTail, heap);
- m_writesSoFar.add(heap);
- if (verbose)
- dataLog(" Clobbered, new tail map: ", mapDump(m_impureData->availableAtTail), "\n");
+ ASSERT(node->op() == Phantom);
+ for (unsigned i = 0; i < AdjacencyList::Size; ++i) {
+ Edge edge = node->children.child(i);
+ if (!edge)
+ continue;
+ if (edge.useKind() != UntypedUse)
+ continue; // Keep the type check.
+ if (edge->flags() & NodeRelevantToOSR)
+ continue;
+
+ node->children.removeEdge(i--);
+ m_changed = true;
+ }
}
- void def(PureValue value)
+ bool setReplacement(Node* replacement)
{
- // With pure values we do not have to worry about the possibility of some control flow path
- // clobbering the value. So, we just search for all of the like values that have been
- // computed. We pick one that is in a block that dominates ours. Note that this means that
- // a PureValue will map to a list of nodes, since there may be many places in the control
- // flow graph that compute a value but only one of them that dominates us. We may build up
- // a large list of nodes that compute some value in the case of gnarly control flow. This
- // is probably OK.
+ if (!replacement)
+ return false;
- auto result = m_pureValues.add(value, Vector<Node*>());
- if (result.isNewEntry) {
- result.iterator->value.append(m_node);
- return;
- }
+ m_currentNode->convertToPhantom();
+ eliminateIrrelevantPhantomChildren(m_currentNode);
- for (unsigned i = result.iterator->value.size(); i--;) {
- Node* candidate = result.iterator->value[i];
- if (m_graph.m_dominators.dominates(candidate->owner, m_block)) {
- m_node->replaceWith(candidate);
- m_changed = true;
- return;
- }
- }
+ // At this point we will eliminate all references to this node.
+ m_currentNode->misc.replacement = replacement;
+
+ m_changed = true;
- result.iterator->value.append(m_node);
+ return true;
}
- LazyNode findReplacement(HeapLocation location)
+ void eliminate()
{
- // At this instant, our "availableAtTail" reflects the set of things that are available in
- // this block so far. We check this map to find block-local CSE opportunities before doing
- // a global search.
- LazyNode match = m_impureData->availableAtTail.get(location);
- if (!!match) {
- if (verbose)
- dataLog(" Found local match: ", match, "\n");
- return match;
- }
+ ASSERT(m_currentNode->mustGenerate());
+ m_currentNode->convertToPhantom();
+ eliminateIrrelevantPhantomChildren(m_currentNode);
- // If it's not available at this point in the block, and at some prior point in the block
- // we have clobbered this heap location, then there is no point in doing a global search.
- if (m_writesSoFar.overlaps(location.heap())) {
- if (verbose)
- dataLog(" Not looking globally because of local clobber: ", m_writesSoFar, "\n");
- return nullptr;
- }
+ m_changed = true;
+ }
+
+ void eliminate(Node* node, NodeType phantomType = Phantom)
+ {
+ if (!node)
+ return;
+ ASSERT(node->mustGenerate());
+ node->setOpAndDefaultFlags(phantomType);
+ if (phantomType == Phantom)
+ eliminateIrrelevantPhantomChildren(node);
- // This perfoms a backward search over the control flow graph to find some possible
- // non-local def() that matches our heap location. Such a match is only valid if there does
- // not exist any path from that def() to our block that contains a write() that overlaps
- // our heap. This algorithm looks for both of these things (the matching def and the
- // overlapping writes) in one backwards DFS pass.
- //
- // This starts by looking at the starting block's predecessors, and then it continues along
- // their predecessors. As soon as this finds a possible def() - one that defines the heap
- // location we want while dominating our starting block - it assumes that this one must be
- // the match. It then lets the DFS over predecessors complete, but it doesn't add the
- // def()'s predecessors; this ensures that any blocks we visit thereafter are on some path
- // from the def() to us. As soon as the DFG finds a write() that overlaps the location's
- // heap, it stops, assuming that there is no possible match. Note that the write() case may
- // trigger before we find a def(), or after. Either way, the write() case causes this
- // function to immediately return nullptr.
- //
- // If the write() is found before we find the def(), then we know that any def() we would
- // find would have a path to us that trips over the write() and hence becomes invalid. This
- // is just a direct outcome of us looking for a def() that dominates us. Given a block A
- // that dominates block B - so that A is the one that would have the def() and B is our
- // starting block - we know that any other block must either be on the path from A to B, or
- // it must be on a path from the root to A, but not both. So, if we haven't found A yet but
- // we already have found a block C that has a write(), then C must be on some path from A
- // to B, which means that A's def() is invalid for our purposes. Hence, before we find the
- // def(), stopping on write() is the right thing to do.
- //
- // Stopping on write() is also the right thing to do after we find the def(). After we find
- // the def(), we don't add that block's predecessors to the search worklist. That means
- // that henceforth the only blocks we will see in the search are blocks on the path from
- // the def() to us. If any such block has a write() that clobbers our heap then we should
- // give up.
- //
- // Hence this graph search algorithm ends up being deceptively simple: any overlapping
- // write() causes us to immediately return nullptr, and a matching def() means that we just
- // record it and neglect to visit its precessors.
+ m_changed = true;
+ }
+
+ void performNodeCSE(Node* node)
+ {
+ if (cseMode == NormalCSE)
+ m_graph.performSubstitution(node);
- Vector<BasicBlock*, 8> worklist;
- Vector<BasicBlock*, 8> seenList;
- BitVector seen;
+ switch (node->op()) {
- for (unsigned i = m_block->predecessors.size(); i--;) {
- BasicBlock* predecessor = m_block->predecessors[i];
- if (!seen.get(predecessor->index)) {
- worklist.append(predecessor);
- seen.set(predecessor->index);
- }
+ case Identity:
+ if (cseMode == StoreElimination)
+ break;
+ setReplacement(node->child1().node());
+ break;
+
+ // Handle the pure nodes. These nodes never have any side-effects.
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ case BitRShift:
+ case BitLShift:
+ case BitURShift:
+ case ArithAdd:
+ case ArithSub:
+ case ArithNegate:
+ case ArithMul:
+ case ArithMod:
+ case ArithDiv:
+ case ArithAbs:
+ case ArithMin:
+ case ArithMax:
+ case ArithSqrt:
+ case ArithSin:
+ case ArithCos:
+ case StringCharAt:
+ case StringCharCodeAt:
+ case IsUndefined:
+ case IsBoolean:
+ case IsNumber:
+ case IsString:
+ case IsObject:
+ case IsFunction:
+ case DoubleAsInt32:
+ case LogicalNot:
+ case SkipTopScope:
+ case SkipScope:
+ case GetClosureRegisters:
+ case GetScope:
+ case TypeOf:
+ case CompareEqConstant:
+ case ValueToInt32:
+ case MakeRope:
+ case Int52ToDouble:
+ case Int52ToValue:
+ if (cseMode == StoreElimination)
+ break;
+ setReplacement(pureCSE(node));
+ break;
+
+ case Int32ToDouble:
+ if (cseMode == StoreElimination)
+ break;
+ setReplacement(int32ToDoubleCSE(node));
+ break;
+
+ case GetCallee:
+ if (cseMode == StoreElimination)
+ break;
+ setReplacement(getCalleeLoadElimination());
+ break;
+
+ case GetLocal: {
+ if (cseMode == StoreElimination)
+ break;
+ VariableAccessData* variableAccessData = node->variableAccessData();
+ if (!variableAccessData->isCaptured())
+ break;
+ Node* relevantLocalOp;
+ Node* possibleReplacement = getLocalLoadElimination(variableAccessData->local(), relevantLocalOp, variableAccessData->isCaptured());
+ if (!relevantLocalOp)
+ break;
+ if (relevantLocalOp->op() != GetLocalUnlinked
+ && relevantLocalOp->variableAccessData() != variableAccessData)
+ break;
+ Node* phi = node->child1().node();
+ if (!setReplacement(possibleReplacement))
+ break;
+
+ m_graph.dethread();
+
+ // If we replace a GetLocal with a GetLocalUnlinked, then turn the GetLocalUnlinked
+ // into a GetLocal.
+ if (relevantLocalOp->op() == GetLocalUnlinked)
+ relevantLocalOp->convertToGetLocal(variableAccessData, phi);
+
+ m_changed = true;
+ break;
}
-
- while (!worklist.isEmpty()) {
- BasicBlock* block = worklist.takeLast();
- seenList.append(block);
-
- if (verbose)
- dataLog(" Searching in block ", *block, "\n");
- ImpureBlockData& data = m_impureDataMap[block];
-
- // We require strict domination because this would only see things in our own block if
- // they came *after* our position in the block. Clearly, while our block dominates
- // itself, the things in the block after us don't dominate us.
- if (m_graph.m_dominators.strictlyDominates(block, m_block)) {
- if (verbose)
- dataLog(" It strictly dominates.\n");
- DFG_ASSERT(m_graph, m_node, data.didVisit);
- DFG_ASSERT(m_graph, m_node, !match);
- if (verbose)
- dataLog(" Availability map: ", mapDump(data.availableAtTail), "\n");
- match = data.availableAtTail.get(location);
- if (verbose)
- dataLog(" Availability: ", match, "\n");
- if (!!match) {
- // Don't examine the predecessors of a match. At this point we just want to
- // establish that other blocks on the path from here to there don't clobber
- // the location we're interested in.
- continue;
- }
+
+ case GetLocalUnlinked: {
+ if (cseMode == StoreElimination)
+ break;
+ Node* relevantLocalOpIgnored;
+ setReplacement(getLocalLoadElimination(node->unlinkedLocal(), relevantLocalOpIgnored, true));
+ break;
+ }
+
+ case Flush: {
+ if (m_graph.m_form == SSA) {
+ // FIXME: Enable Flush store elimination in SSA form.
+ // https://bugs.webkit.org/show_bug.cgi?id=125429
+ break;
+ }
+ VariableAccessData* variableAccessData = node->variableAccessData();
+ VirtualRegister local = variableAccessData->local();
+ Node* replacement = node->child1().node();
+ if (replacement->op() != SetLocal)
+ break;
+ ASSERT(replacement->variableAccessData() == variableAccessData);
+ // FIXME: We should be able to remove SetLocals that can exit; we just need
+ // to replace them with appropriate type checks.
+ if (cseMode == NormalCSE) {
+ // Need to be conservative at this time; if the SetLocal has any chance of performing
+ // any speculations then we cannot do anything.
+ FlushFormat format = variableAccessData->flushFormat();
+ ASSERT(format != DeadFlush);
+ if (format != FlushedJSValue)
+ break;
+ } else {
+ if (replacement->canExit())
+ break;
}
+ SetLocalStoreEliminationResult result =
+ setLocalStoreElimination(local, replacement);
+ if (result.mayBeAccessed || result.mayClobberWorld)
+ break;
+ ASSERT(replacement->op() == SetLocal);
+ // FIXME: Investigate using mayExit as a further optimization.
+ node->convertToPhantom();
+ Node* dataNode = replacement->child1().node();
+ ASSERT(dataNode->hasResult());
+ node->child1() = Edge(dataNode);
+ m_graph.dethread();
+ m_changed = true;
+ break;
+ }
+
+ case JSConstant:
+ if (cseMode == StoreElimination)
+ break;
+ // This is strange, but necessary. Some phases will convert nodes to constants,
+ // which may result in duplicated constants. We use CSE to clean this up.
+ setReplacement(constantCSE(node));
+ break;
- if (verbose)
- dataLog(" Dealing with write set ", data.writes, "\n");
- if (data.writes.overlaps(location.heap())) {
- if (verbose)
- dataLog(" Clobbered.\n");
- return nullptr;
+ case WeakJSConstant:
+ if (cseMode == StoreElimination)
+ break;
+ // FIXME: have CSE for weak constants against strong constants and vice-versa.
+ setReplacement(weakConstantCSE(node));
+ break;
+
+ case ConstantStoragePointer:
+ if (cseMode == StoreElimination)
+ break;
+ setReplacement(constantStoragePointerCSE(node));
+ break;
+
+ case GetArrayLength:
+ if (cseMode == StoreElimination)
+ break;
+ setReplacement(getArrayLengthElimination(node->child1().node()));
+ break;
+
+ case GetMyScope:
+ if (cseMode == StoreElimination)
+ break;
+ setReplacement(getMyScopeLoadElimination());
+ break;
+
+ // Handle nodes that are conditionally pure: these are pure, and can
+ // be CSE'd, so long as the prediction is the one we want.
+ case CompareLess:
+ case CompareLessEq:
+ case CompareGreater:
+ case CompareGreaterEq:
+ case CompareEq: {
+ if (cseMode == StoreElimination)
+ break;
+ if (m_graph.isPredictedNumerical(node)) {
+ Node* replacement = pureCSE(node);
+ if (replacement && m_graph.isPredictedNumerical(replacement))
+ setReplacement(replacement);
}
+ break;
+ }
- for (unsigned i = block->predecessors.size(); i--;) {
- BasicBlock* predecessor = block->predecessors[i];
- if (!seen.get(predecessor->index)) {
- worklist.append(predecessor);
- seen.set(predecessor->index);
- }
+ // Finally handle heap accesses. These are not quite pure, but we can still
+ // optimize them provided that some subtle conditions are met.
+ case GetGlobalVar:
+ if (cseMode == StoreElimination)
+ break;
+ setReplacement(globalVarLoadElimination(node->registerPointer()));
+ break;
+
+ case GetClosureVar: {
+ if (cseMode == StoreElimination)
+ break;
+ setReplacement(scopedVarLoadElimination(node->child1().node(), node->varNumber()));
+ break;
+ }
+
+ case VarInjectionWatchpoint:
+ if (cseMode == StoreElimination)
+ break;
+ if (varInjectionWatchpointElimination())
+ eliminate();
+ break;
+
+ case PutGlobalVar:
+ if (cseMode == NormalCSE)
+ break;
+ eliminate(globalVarStoreElimination(node->registerPointer()));
+ break;
+
+ case PutClosureVar: {
+ if (cseMode == NormalCSE)
+ break;
+ eliminate(scopedVarStoreElimination(node->child1().node(), node->child2().node(), node->varNumber()));
+ break;
+ }
+
+ case GetByVal:
+ if (cseMode == StoreElimination)
+ break;
+ if (m_graph.byValIsPure(node))
+ setReplacement(getByValLoadElimination(node->child1().node(), node->child2().node(), node->arrayMode()));
+ break;
+
+ case PutByValDirect:
+ case PutByVal: {
+ if (cseMode == StoreElimination)
+ break;
+ Edge child1 = m_graph.varArgChild(node, 0);
+ Edge child2 = m_graph.varArgChild(node, 1);
+ if (node->arrayMode().canCSEStorage()) {
+ Node* replacement = getByValLoadElimination(child1.node(), child2.node(), node->arrayMode());
+ if (!replacement)
+ break;
+ node->setOp(PutByValAlias);
}
+ break;
+ }
+
+ case CheckStructure:
+ if (cseMode == StoreElimination)
+ break;
+ if (checkStructureElimination(node->structureSet(), node->child1().node()))
+ eliminate();
+ break;
+
+ case StructureTransitionWatchpoint:
+ if (cseMode == StoreElimination)
+ break;
+ if (structureTransitionWatchpointElimination(node->structure(), node->child1().node()))
+ eliminate();
+ break;
+
+ case PutStructure:
+ if (cseMode == NormalCSE)
+ break;
+ eliminate(putStructureStoreElimination(node->child1().node()), PhantomPutStructure);
+ break;
+
+ case CheckFunction:
+ if (cseMode == StoreElimination)
+ break;
+ if (checkFunctionElimination(node->function(), node->child1().node()))
+ eliminate();
+ break;
+
+ case CheckExecutable:
+ if (cseMode == StoreElimination)
+ break;
+ if (checkExecutableElimination(node->executable(), node->child1().node()))
+ eliminate();
+ break;
+
+ case CheckArray:
+ if (cseMode == StoreElimination)
+ break;
+ if (checkArrayElimination(node->child1().node(), node->arrayMode()))
+ eliminate();
+ break;
+
+ case GetIndexedPropertyStorage: {
+ if (cseMode == StoreElimination)
+ break;
+ setReplacement(getIndexedPropertyStorageLoadElimination(node->child1().node(), node->arrayMode()));
+ break;
+ }
+
+ case GetTypedArrayByteOffset: {
+ if (cseMode == StoreElimination)
+ break;
+ setReplacement(getTypedArrayByteOffsetLoadElimination(node->child1().node()));
+ break;
+ }
+
+ case GetButterfly:
+ if (cseMode == StoreElimination)
+ break;
+ setReplacement(getPropertyStorageLoadElimination(node->child1().node()));
+ break;
+
+ case GetByOffset:
+ if (cseMode == StoreElimination)
+ break;
+ setReplacement(getByOffsetLoadElimination(m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber, node->child1().node()));
+ break;
+
+ case PutByOffset:
+ if (cseMode == NormalCSE)
+ break;
+ eliminate(putByOffsetStoreElimination(m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber, node->child1().node()));
+ break;
+
+ case Phantom:
+ // FIXME: we ought to remove Phantom's that have no children.
+
+ eliminateIrrelevantPhantomChildren(node);
+ break;
+
+ default:
+ // do nothing.
+ break;
}
- if (!match)
- return nullptr;
-
- // Cache the results for next time. We cache them both for this block and for all of our
- // predecessors, since even though we've already visited our predecessors, our predecessors
- // probably have successors other than us.
- // FIXME: Consider caching failed searches as well, when match is null. It's not clear that
- // the reduction in compile time would warrant the increase in complexity, though.
- // https://bugs.webkit.org/show_bug.cgi?id=134876
- for (BasicBlock* block : seenList)
- m_impureDataMap[block].availableAtTail.add(location, match);
- m_impureData->availableAtTail.add(location, match);
-
- return match;
+ m_lastSeen[node->op()] = m_indexInBlock;
}
- void def(HeapLocation location, LazyNode value)
+ void performBlockCSE(BasicBlock* block)
{
- if (verbose)
- dataLog(" Got heap location def: ", location, " -> ", value, "\n");
-
- LazyNode match = findReplacement(location);
+ if (!block)
+ return;
+ if (!block->isReachable)
+ return;
- if (verbose)
- dataLog(" Got match: ", match, "\n");
+ m_currentBlock = block;
+ for (unsigned i = 0; i < LastNodeType; ++i)
+ m_lastSeen[i] = UINT_MAX;
- if (!match) {
- if (verbose)
- dataLog(" Adding at-tail mapping: ", location, " -> ", value, "\n");
- auto result = m_impureData->availableAtTail.add(location, value);
- ASSERT_UNUSED(result, result.isNewEntry);
- return;
+ for (m_indexInBlock = 0; m_indexInBlock < block->size(); ++m_indexInBlock) {
+ m_currentNode = block->at(m_indexInBlock);
+ performNodeCSE(m_currentNode);
}
-
- if (value.isNode() && value.asNode() == m_node) {
- if (!match.isNode()) {
- // We need to properly record the constant in order to use an existing one if applicable.
- // This ensures that re-running GCSE will not find new optimizations.
- match.ensureIsNode(m_insertionSet, m_block, m_nodeIndex)->owner = m_block;
- auto result = m_pureValues.add(PureValue(match.asNode(), match->constant()), Vector<Node*>());
- bool replaced = false;
- if (!result.isNewEntry) {
- for (unsigned i = result.iterator->value.size(); i--;) {
- Node* candidate = result.iterator->value[i];
- if (m_graph.m_dominators.dominates(candidate->owner, m_block)) {
- ASSERT(candidate);
- match->replaceWith(candidate);
- match.setNode(candidate);
- replaced = true;
- break;
- }
- }
- }
- if (!replaced)
- result.iterator->value.append(match.asNode());
- }
- ASSERT(match.asNode());
- m_node->replaceWith(match.asNode());
- m_changed = true;
+
+ if (!ASSERT_DISABLED && cseMode == StoreElimination) {
+ // Nobody should have replacements set.
+ for (unsigned i = 0; i < block->size(); ++i)
+ ASSERT(!block->at(i)->misc.replacement);
}
}
- struct ImpureBlockData {
- ImpureBlockData()
- : didVisit(false)
- {
- }
-
- ClobberSet writes;
- ImpureMap availableAtTail;
- bool didVisit;
- };
-
- Vector<BasicBlock*> m_preOrder;
-
- PureMultiMap m_pureValues;
- BlockMap<ImpureBlockData> m_impureDataMap;
-
- BasicBlock* m_block;
- Node* m_node;
- unsigned m_nodeIndex;
- ImpureBlockData* m_impureData;
- ClobberSet m_writesSoFar;
- InsertionSet m_insertionSet;
-
- bool m_changed;
+ BasicBlock* m_currentBlock;
+ Node* m_currentNode;
+ unsigned m_indexInBlock;
+ std::array<unsigned, LastNodeType> m_lastSeen;
+ bool m_changed; // Only tracks changes that have a substantive effect on other optimizations.
};
-} // anonymous namespace
-
-bool performLocalCSE(Graph& graph)
+bool performCSE(Graph& graph)
{
- SamplingRegion samplingRegion("DFG LocalCSE Phase");
- return runPhase<LocalCSEPhase>(graph);
+ SamplingRegion samplingRegion("DFG CSE Phase");
+ return runPhase<CSEPhase<NormalCSE>>(graph);
}
-bool performGlobalCSE(Graph& graph)
+bool performStoreElimination(Graph& graph)
{
- SamplingRegion samplingRegion("DFG GlobalCSE Phase");
- return runPhase<GlobalCSEPhase>(graph);
+ SamplingRegion samplingRegion("DFG Store Elimination Phase");
+ return runPhase<CSEPhase<StoreElimination>>(graph);
}
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
+
+
diff --git a/Source/JavaScriptCore/dfg/DFGCSEPhase.h b/Source/JavaScriptCore/dfg/DFGCSEPhase.h
index 562fd9bca..1dfd2b7dd 100644
--- a/Source/JavaScriptCore/dfg/DFGCSEPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGCSEPhase.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGCSEPhase_h
#define DFGCSEPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
@@ -34,20 +36,14 @@ namespace JSC { namespace DFG {
class Graph;
-// Block-local common subexpression elimination. It uses clobberize() for heap
-// modeling, which is quite precise. This phase is known to produce big wins on
-// a few benchmarks, and is relatively cheap to run.
-//
-// Note that this phase also gets rid of Identity nodes, which means that it's
-// currently not an optional phase. Basically, DFG IR doesn't have use-lists,
-// so there is no instantaneous replaceAllUsesWith operation. Instead, you turn
-// a node into an Identity and wait for CSE to clean it up.
-bool performLocalCSE(Graph&);
-
-// Same, but global. Only works for SSA. This will find common subexpressions
-// both in the same block and in any block that dominates the current block. It
-// has no limits on how far it will look for load-elimination opportunities.
-bool performGlobalCSE(Graph&);
+// Block-local common subexpression elimination. This is an optional phase, but
+// it is rather profitable. It has fairly accurate heap modeling and will match
+// a wide range of subexpression similarities. It's known to produce big wins
+// on a few benchmarks, and is relatively cheap to run.
+bool performCSE(Graph&);
+
+// Perform just block-local store elimination.
+bool performStoreElimination(Graph&);
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGCallArrayAllocatorSlowPathGenerator.h b/Source/JavaScriptCore/dfg/DFGCallArrayAllocatorSlowPathGenerator.h
index 9780e5029..ac7dc45fe 100644
--- a/Source/JavaScriptCore/dfg/DFGCallArrayAllocatorSlowPathGenerator.h
+++ b/Source/JavaScriptCore/dfg/DFGCallArrayAllocatorSlowPathGenerator.h
@@ -26,6 +26,8 @@
#ifndef DFGCallArrayAllocatorSlowPathGenerator_h
#define DFGCallArrayAllocatorSlowPathGenerator_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
@@ -96,7 +98,7 @@ protected:
for (unsigned i = 0; i < m_plans.size(); ++i)
jit->silentSpill(m_plans[i]);
GPRReg scratchGPR = AssemblyHelpers::selectScratchGPR(m_sizeGPR);
- MacroAssembler::Jump bigLength = jit->m_jit.branch32(MacroAssembler::AboveOrEqual, m_sizeGPR, MacroAssembler::TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
+ MacroAssembler::Jump bigLength = jit->m_jit.branch32(MacroAssembler::AboveOrEqual, m_sizeGPR, MacroAssembler::TrustedImm32(MIN_SPARSE_ARRAY_INDEX));
jit->m_jit.move(MacroAssembler::TrustedImmPtr(m_contiguousStructure), scratchGPR);
MacroAssembler::Jump done = jit->m_jit.jump();
bigLength.link(&jit->m_jit);
diff --git a/Source/JavaScriptCore/dfg/DFGCallCreateDirectArgumentsSlowPathGenerator.h b/Source/JavaScriptCore/dfg/DFGCallCreateDirectArgumentsSlowPathGenerator.h
deleted file mode 100644
index 871854cf8..000000000
--- a/Source/JavaScriptCore/dfg/DFGCallCreateDirectArgumentsSlowPathGenerator.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGCallCreateDirectArgumentsSlowPathGenerator_h
-#define DFGCallCreateDirectArgumentsSlowPathGenerator_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGCommon.h"
-#include "DFGOperations.h"
-#include "DFGSlowPathGenerator.h"
-#include "DFGSpeculativeJIT.h"
-#include "DirectArguments.h"
-
-namespace JSC { namespace DFG {
-
-// This calls operationCreateDirectArguments but then restores the value of lengthGPR.
-class CallCreateDirectArgumentsSlowPathGenerator : public JumpingSlowPathGenerator<MacroAssembler::JumpList> {
-public:
- CallCreateDirectArgumentsSlowPathGenerator(
- MacroAssembler::JumpList from, SpeculativeJIT* jit, GPRReg resultGPR, Structure* structure,
- GPRReg lengthGPR, unsigned minCapacity)
- : JumpingSlowPathGenerator<MacroAssembler::JumpList>(from, jit)
- , m_resultGPR(resultGPR)
- , m_structure(structure)
- , m_lengthGPR(lengthGPR)
- , m_minCapacity(minCapacity)
- {
- jit->silentSpillAllRegistersImpl(false, m_plans, resultGPR);
- }
-
-protected:
- void generateInternal(SpeculativeJIT* jit) override
- {
- linkFrom(jit);
- for (unsigned i = 0; i < m_plans.size(); ++i)
- jit->silentSpill(m_plans[i]);
- jit->callOperation(
- operationCreateDirectArguments, m_resultGPR, m_structure, m_lengthGPR, m_minCapacity);
- GPRReg canTrample = SpeculativeJIT::pickCanTrample(m_resultGPR);
- for (unsigned i = m_plans.size(); i--;)
- jit->silentFill(m_plans[i], canTrample);
- jit->m_jit.loadPtr(
- MacroAssembler::Address(m_resultGPR, DirectArguments::offsetOfLength()), m_lengthGPR);
- jumpTo(jit);
- }
-
-private:
- GPRReg m_resultGPR;
- Structure* m_structure;
- GPRReg m_lengthGPR;
- unsigned m_minCapacity;
- Vector<SilentRegisterSavePlan, 2> m_plans;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGCallCreateDirectArgumentsSlowPathGenerator_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.cpp b/Source/JavaScriptCore/dfg/DFGCapabilities.cpp
index f9b5e7755..e6fcd8c11 100644
--- a/Source/JavaScriptCore/dfg/DFGCapabilities.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCapabilities.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,57 +31,44 @@
#include "CodeBlock.h"
#include "DFGCommon.h"
#include "Interpreter.h"
-#include "JSCInlines.h"
-#include "Options.h"
namespace JSC { namespace DFG {
-bool isSupported()
-{
- return Options::useDFGJIT()
- && MacroAssembler::supportsFloatingPoint();
-}
-
-bool isSupportedForInlining(CodeBlock* codeBlock)
-{
- return codeBlock->ownerExecutable()->isInliningCandidate();
-}
-
+#if ENABLE(DFG_JIT)
bool mightCompileEval(CodeBlock* codeBlock)
{
- return isSupported()
- && codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount();
+ return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount();
}
bool mightCompileProgram(CodeBlock* codeBlock)
{
- return isSupported()
- && codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount();
+ return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount();
}
bool mightCompileFunctionForCall(CodeBlock* codeBlock)
{
- return isSupported()
- && codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount();
+ return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount();
}
bool mightCompileFunctionForConstruct(CodeBlock* codeBlock)
{
- return isSupported()
- && codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount();
+ return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount();
}
bool mightInlineFunctionForCall(CodeBlock* codeBlock)
{
return codeBlock->instructionCount() <= Options::maximumFunctionForCallInlineCandidateInstructionCount()
- && isSupportedForInlining(codeBlock);
+ && !codeBlock->ownerExecutable()->needsActivation()
+ && codeBlock->ownerExecutable()->isInliningCandidate();
}
bool mightInlineFunctionForClosureCall(CodeBlock* codeBlock)
{
return codeBlock->instructionCount() <= Options::maximumFunctionForClosureCallInlineCandidateInstructionCount()
- && isSupportedForInlining(codeBlock);
+ && !codeBlock->ownerExecutable()->needsActivation()
+ && codeBlock->ownerExecutable()->isInliningCandidate();
}
bool mightInlineFunctionForConstruct(CodeBlock* codeBlock)
{
return codeBlock->instructionCount() <= Options::maximumFunctionForConstructInlineCandidateInstructionCount()
- && isSupportedForInlining(codeBlock);
+ && !codeBlock->ownerExecutable()->needsActivation()
+ && codeBlock->ownerExecutable()->isInliningCandidate();
}
inline void debugFail(CodeBlock* codeBlock, OpcodeID opcodeID, CapabilityLevel result)
@@ -92,13 +79,12 @@ inline void debugFail(CodeBlock* codeBlock, OpcodeID opcodeID, CapabilityLevel r
CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruction* pc)
{
- UNUSED_PARAM(codeBlock); // This function does some bytecode parsing. Ordinarily bytecode parsing requires the owning CodeBlock. It's sort of strange that we don't use it here right now.
-
switch (opcodeID) {
case op_enter:
+ case op_touch_entry:
case op_to_this:
- case op_check_tdz:
case op_create_this:
+ case op_get_callee:
case op_bitand:
case op_bitor:
case op_bitxor:
@@ -117,9 +103,8 @@ CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruc
case op_debug:
case op_profile_will_call:
case op_profile_did_call:
- case op_profile_type:
- case op_profile_control_flow:
case op_mov:
+ case op_captured_mov:
case op_check_has_instance:
case op_instanceof:
case op_is_undefined:
@@ -127,7 +112,6 @@ CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruc
case op_is_number:
case op_is_string:
case op_is_object:
- case op_is_object_or_null:
case op_is_function:
case op_not:
case op_less:
@@ -152,6 +136,8 @@ CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruc
case op_put_by_id_transition_direct_out_of_line:
case op_put_by_id_transition_normal:
case op_put_by_id_transition_normal_out_of_line:
+ case op_init_global_const_nop:
+ case op_init_global_const:
case op_jmp:
case op_jtrue:
case op_jfalse:
@@ -178,35 +164,18 @@ CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruc
case op_throw_static_error:
case op_call:
case op_construct:
- case op_call_varargs:
- case op_construct_varargs:
- case op_create_direct_arguments:
- case op_create_scoped_arguments:
- case op_create_out_of_band_arguments:
- case op_get_from_arguments:
- case op_put_to_arguments:
+ case op_init_lazy_reg:
+ case op_create_arguments:
+ case op_tear_off_arguments:
+ case op_get_argument_by_val:
+ case op_get_arguments_length:
case op_jneq_ptr:
case op_typeof:
case op_to_number:
- case op_to_string:
case op_switch_imm:
case op_switch_char:
case op_in:
- case op_get_scope:
case op_get_from_scope:
- case op_get_enumerable_length:
- case op_has_generic_property:
- case op_has_structure_property:
- case op_has_indexed_property:
- case op_get_direct_pname:
- case op_get_property_enumerator:
- case op_enumerator_structure_pname:
- case op_enumerator_generic_pname:
- case op_to_index_string:
- case op_new_func:
- case op_new_func_exp:
- case op_create_lexical_environment:
- case op_get_parent_scope:
return CanCompileAndInline;
case op_put_to_scope: {
@@ -220,13 +189,23 @@ CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruc
case op_resolve_scope: {
// We don't compile 'catch' or 'with', so there's no point in compiling variable resolution within them.
- ResolveType resolveType = ResolveModeAndType(pc[4].u.operand).type();
+ ResolveType resolveType = ResolveModeAndType(pc[3].u.operand).type();
if (resolveType == Dynamic)
return CannotCompile;
return CanCompileAndInline;
}
- case op_new_regexp:
+ case op_call_varargs:
+ if (codeBlock->usesArguments() && pc[4].u.operand == codeBlock->argumentsRegister().offset())
+ return CanInline;
+ return CannotCompile;
+
+ case op_new_regexp:
+ case op_create_activation:
+ case op_tear_off_activation:
+ case op_new_func:
+ case op_new_captured_func:
+ case op_new_func_exp:
case op_switch_string: // Don't inline because we don't want to copy string tables in the concurrent JIT.
return CanCompile;
@@ -265,6 +244,8 @@ CapabilityLevel capabilityLevel(CodeBlock* codeBlock)
return result;
}
+#endif
+
} } // namespace JSC::DFG
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.h b/Source/JavaScriptCore/dfg/DFGCapabilities.h
index 4010bb291..5bd80c517 100644
--- a/Source/JavaScriptCore/dfg/DFGCapabilities.h
+++ b/Source/JavaScriptCore/dfg/DFGCapabilities.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,18 +28,18 @@
#include "CodeBlock.h"
#include "DFGCommon.h"
+#include "DFGNode.h"
#include "Executable.h"
#include "Interpreter.h"
#include "Intrinsic.h"
#include "Options.h"
+#include <wtf/Platform.h>
namespace JSC { namespace DFG {
#if ENABLE(DFG_JIT)
// Fast check functions; if they return true it is still necessary to
// check opcodes.
-bool isSupported();
-bool isSupportedForInlining(CodeBlock*);
bool mightCompileEval(CodeBlock*);
bool mightCompileProgram(CodeBlock*);
bool mightCompileFunctionForCall(CodeBlock*);
@@ -80,72 +80,43 @@ inline CapabilityLevel programCapabilityLevel(CodeBlock* codeBlock)
return capabilityLevel(codeBlock);
}
-inline CapabilityLevel functionCapabilityLevel(bool mightCompile, bool mightInline, CapabilityLevel computedCapabilityLevel)
-{
- if (mightCompile && mightInline)
- return leastUpperBound(CanCompileAndInline, computedCapabilityLevel);
- if (mightCompile && !mightInline)
- return leastUpperBound(CanCompile, computedCapabilityLevel);
- if (!mightCompile)
- return CannotCompile;
- RELEASE_ASSERT_NOT_REACHED();
- return CannotCompile;
-}
-
inline CapabilityLevel functionForCallCapabilityLevel(CodeBlock* codeBlock)
{
- return functionCapabilityLevel(
- mightCompileFunctionForCall(codeBlock),
- mightInlineFunctionForCall(codeBlock),
- capabilityLevel(codeBlock));
+ if (!mightCompileFunctionForCall(codeBlock))
+ return CannotCompile;
+
+ return capabilityLevel(codeBlock);
}
inline CapabilityLevel functionForConstructCapabilityLevel(CodeBlock* codeBlock)
{
- return functionCapabilityLevel(
- mightCompileFunctionForConstruct(codeBlock),
- mightInlineFunctionForConstruct(codeBlock),
- capabilityLevel(codeBlock));
-}
-
-inline CapabilityLevel inlineFunctionForCallCapabilityLevel(CodeBlock* codeBlock)
-{
- if (!mightInlineFunctionForCall(codeBlock))
+ if (!mightCompileFunctionForConstruct(codeBlock))
return CannotCompile;
return capabilityLevel(codeBlock);
}
-inline CapabilityLevel inlineFunctionForClosureCallCapabilityLevel(CodeBlock* codeBlock)
+inline bool canInlineFunctionForCall(CodeBlock* codeBlock)
{
- if (!mightInlineFunctionForClosureCall(codeBlock))
- return CannotCompile;
-
- return capabilityLevel(codeBlock);
+ return mightInlineFunctionForCall(codeBlock) && canInline(capabilityLevel(codeBlock));
}
-inline CapabilityLevel inlineFunctionForConstructCapabilityLevel(CodeBlock* codeBlock)
+inline bool canInlineFunctionForClosureCall(CodeBlock* codeBlock)
{
- if (!mightInlineFunctionForConstruct(codeBlock))
- return CannotCompile;
-
- return capabilityLevel(codeBlock);
+ return mightInlineFunctionForClosureCall(codeBlock) && canInline(capabilityLevel(codeBlock));
}
-inline bool mightInlineFunctionFor(CodeBlock* codeBlock, CodeSpecializationKind kind)
+inline bool canInlineFunctionForConstruct(CodeBlock* codeBlock)
{
- if (kind == CodeForCall)
- return mightInlineFunctionForCall(codeBlock);
- ASSERT(kind == CodeForConstruct);
- return mightInlineFunctionForConstruct(codeBlock);
+ return mightInlineFunctionForConstruct(codeBlock) && canInline(capabilityLevel(codeBlock));
}
-inline bool mightCompileFunctionFor(CodeBlock* codeBlock, CodeSpecializationKind kind)
+inline bool mightInlineFunctionFor(CodeBlock* codeBlock, CodeSpecializationKind kind)
{
if (kind == CodeForCall)
- return mightCompileFunctionForCall(codeBlock);
+ return mightInlineFunctionForCall(codeBlock);
ASSERT(kind == CodeForConstruct);
- return mightCompileFunctionForConstruct(codeBlock);
+ return mightInlineFunctionForConstruct(codeBlock);
}
inline bool mightInlineFunction(CodeBlock* codeBlock)
@@ -153,22 +124,16 @@ inline bool mightInlineFunction(CodeBlock* codeBlock)
return mightInlineFunctionFor(codeBlock, codeBlock->specializationKind());
}
-inline CapabilityLevel inlineFunctionForCapabilityLevel(CodeBlock* codeBlock, CodeSpecializationKind kind, bool isClosureCall)
+inline bool canInlineFunctionFor(CodeBlock* codeBlock, CodeSpecializationKind kind, bool isClosureCall)
{
if (isClosureCall) {
- if (kind != CodeForCall)
- return CannotCompile;
- return inlineFunctionForClosureCallCapabilityLevel(codeBlock);
+ ASSERT(kind == CodeForCall);
+ return canInlineFunctionForClosureCall(codeBlock);
}
if (kind == CodeForCall)
- return inlineFunctionForCallCapabilityLevel(codeBlock);
+ return canInlineFunctionForCall(codeBlock);
ASSERT(kind == CodeForConstruct);
- return inlineFunctionForConstructCapabilityLevel(codeBlock);
-}
-
-inline bool isSmallEnoughToInlineCodeInto(CodeBlock* codeBlock)
-{
- return codeBlock->instructionCount() <= Options::maximumInliningCallerSize();
+ return canInlineFunctionForConstruct(codeBlock);
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGCleanUpPhase.cpp b/Source/JavaScriptCore/dfg/DFGCleanUpPhase.cpp
deleted file mode 100644
index 313094c39..000000000
--- a/Source/JavaScriptCore/dfg/DFGCleanUpPhase.cpp
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGCleanUpPhase.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGGraph.h"
-#include "DFGInsertionSet.h"
-#include "DFGPhase.h"
-#include "DFGPredictionPropagationPhase.h"
-#include "DFGVariableAccessDataDump.h"
-#include "JSCInlines.h"
-
-namespace JSC { namespace DFG {
-
-class CleanUpPhase : public Phase {
-public:
- CleanUpPhase(Graph& graph)
- : Phase(graph, "clean up")
- {
- }
-
- bool run()
- {
- bool changed = false;
-
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- unsigned sourceIndex = 0;
- unsigned targetIndex = 0;
- while (sourceIndex < block->size()) {
- Node* node = block->at(sourceIndex++);
- bool kill = false;
-
- if (node->op() == Check)
- node->children = node->children.justChecks();
-
- switch (node->op()) {
- case Phantom:
- case Check:
- if (node->children.isEmpty())
- kill = true;
- break;
- default:
- break;
- }
-
- if (kill)
- m_graph.m_allocator.free(node);
- else
- block->at(targetIndex++) = node;
- }
- block->resize(targetIndex);
- }
-
- return changed;
- }
-};
-
-bool performCleanUp(Graph& graph)
-{
- SamplingRegion samplingRegion("DFG Clean Up Phase");
- return runPhase<CleanUpPhase>(graph);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGCleanUpPhase.h b/Source/JavaScriptCore/dfg/DFGCleanUpPhase.h
deleted file mode 100644
index 3a1bc6916..000000000
--- a/Source/JavaScriptCore/dfg/DFGCleanUpPhase.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGCleanUpPhase_h
-#define DFGCleanUpPhase_h
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-class Graph;
-
-// Cleans up unneeded nodes, like empty Checks and Phantoms.
-
-bool performCleanUp(Graph&);
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGCleanUpPhase_h
diff --git a/Source/JavaScriptCore/dfg/DFGClobberSet.cpp b/Source/JavaScriptCore/dfg/DFGClobberSet.cpp
index d4630e370..791314172 100644
--- a/Source/JavaScriptCore/dfg/DFGClobberSet.cpp
+++ b/Source/JavaScriptCore/dfg/DFGClobberSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,7 +29,7 @@
#if ENABLE(DFG_JIT)
#include "DFGClobberize.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include <wtf/ListDump.h>
namespace JSC { namespace DFG {
@@ -122,38 +122,37 @@ HashSet<AbstractHeap> ClobberSet::setOf(bool direct) const
void addReads(Graph& graph, Node* node, ClobberSet& readSet)
{
ClobberSetAdd addRead(readSet);
- NoOpClobberize noOp;
- clobberize(graph, node, addRead, noOp, noOp);
+ NoOpClobberize addWrite;
+ clobberize(graph, node, addRead, addWrite);
}
void addWrites(Graph& graph, Node* node, ClobberSet& writeSet)
{
- NoOpClobberize noOp;
+ NoOpClobberize addRead;
ClobberSetAdd addWrite(writeSet);
- clobberize(graph, node, noOp, addWrite, noOp);
+ clobberize(graph, node, addRead, addWrite);
}
void addReadsAndWrites(Graph& graph, Node* node, ClobberSet& readSet, ClobberSet& writeSet)
{
ClobberSetAdd addRead(readSet);
ClobberSetAdd addWrite(writeSet);
- NoOpClobberize noOp;
- clobberize(graph, node, addRead, addWrite, noOp);
+ clobberize(graph, node, addRead, addWrite);
}
bool readsOverlap(Graph& graph, Node* node, ClobberSet& readSet)
{
ClobberSetOverlaps addRead(readSet);
- NoOpClobberize noOp;
- clobberize(graph, node, addRead, noOp, noOp);
+ NoOpClobberize addWrite;
+ clobberize(graph, node, addRead, addWrite);
return addRead.result();
}
bool writesOverlap(Graph& graph, Node* node, ClobberSet& writeSet)
{
- NoOpClobberize noOp;
+ NoOpClobberize addRead;
ClobberSetOverlaps addWrite(writeSet);
- clobberize(graph, node, noOp, addWrite, noOp);
+ clobberize(graph, node, addRead, addWrite);
return addWrite.result();
}
diff --git a/Source/JavaScriptCore/dfg/DFGClobberSet.h b/Source/JavaScriptCore/dfg/DFGClobberSet.h
index d76d3559d..18514f61b 100644
--- a/Source/JavaScriptCore/dfg/DFGClobberSet.h
+++ b/Source/JavaScriptCore/dfg/DFGClobberSet.h
@@ -26,6 +26,8 @@
#ifndef DFGClobberSet_h
#define DFGClobberSet_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGAbstractHeap.h"
@@ -80,7 +82,7 @@ public:
{
}
- void operator()(AbstractHeap heap) const
+ void operator()(AbstractHeap heap)
{
m_set.add(heap);
}
@@ -96,7 +98,7 @@ public:
{
}
- void operator()(AbstractHeap heap) const
+ void operator()(AbstractHeap heap)
{
m_result |= m_set.overlaps(heap);
}
@@ -105,7 +107,7 @@ public:
private:
const ClobberSet& m_set;
- mutable bool m_result;
+ bool m_result;
};
void addReads(Graph&, Node*, ClobberSet&);
diff --git a/Source/JavaScriptCore/dfg/DFGClobberize.cpp b/Source/JavaScriptCore/dfg/DFGClobberize.cpp
index a693ba41b..be6185629 100644
--- a/Source/JavaScriptCore/dfg/DFGClobberize.cpp
+++ b/Source/JavaScriptCore/dfg/DFGClobberize.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,53 +28,26 @@
#if ENABLE(DFG_JIT)
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
bool doesWrites(Graph& graph, Node* node)
{
- NoOpClobberize noOp;
+ NoOpClobberize addRead;
CheckClobberize addWrite;
- clobberize(graph, node, noOp, addWrite, noOp);
+ clobberize(graph, node, addRead, addWrite);
return addWrite.result();
}
-bool accessesOverlap(Graph& graph, Node* node, AbstractHeap heap)
-{
- NoOpClobberize noOp;
- AbstractHeapOverlaps addAccess(heap);
- clobberize(graph, node, addAccess, addAccess, noOp);
- return addAccess.result();
-}
-
bool writesOverlap(Graph& graph, Node* node, AbstractHeap heap)
{
- NoOpClobberize noOp;
+ NoOpClobberize addRead;
AbstractHeapOverlaps addWrite(heap);
- clobberize(graph, node, noOp, addWrite, noOp);
+ clobberize(graph, node, addRead, addWrite);
return addWrite.result();
}
-bool clobbersHeap(Graph& graph, Node* node)
-{
- bool result = false;
- clobberize(
- graph, node, NoOpClobberize(),
- [&] (AbstractHeap heap) {
- switch (heap.kind()) {
- case World:
- case Heap:
- result = true;
- break;
- default:
- break;
- }
- },
- NoOpClobberize());
- return result;
-}
-
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGClobberize.h b/Source/JavaScriptCore/dfg/DFGClobberize.h
index 12b1cb5fb..6f2a03bf6 100644
--- a/Source/JavaScriptCore/dfg/DFGClobberize.h
+++ b/Source/JavaScriptCore/dfg/DFGClobberize.h
@@ -1,5 +1,5 @@
-/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ /*
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,28 +26,30 @@
#ifndef DFGClobberize_h
#define DFGClobberize_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGAbstractHeap.h"
#include "DFGEdgeUsesStructure.h"
#include "DFGGraph.h"
-#include "DFGHeapLocation.h"
-#include "DFGLazyNode.h"
-#include "DFGPureValue.h"
namespace JSC { namespace DFG {
-template<typename ReadFunctor, typename WriteFunctor, typename DefFunctor>
-void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFunctor& write, const DefFunctor& def)
+template<typename ReadFunctor, typename WriteFunctor>
+void clobberizeForAllocation(ReadFunctor& read, WriteFunctor& write)
+{
+ read(GCState);
+ read(BarrierState);
+ write(GCState);
+ write(BarrierState);
+}
+
+template<typename ReadFunctor, typename WriteFunctor>
+void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write)
{
// Some notes:
//
- // - The canonical way of clobbering the world is to read world and write
- // heap. This is because World subsumes Heap and Stack, and Stack can be
- // read by anyone but only written to by explicit stack writing operations.
- // Of course, claiming to also write World is not wrong; it'll just
- // pessimise some important optimizations.
- //
// - We cannot hoist, or sink, anything that has effects. This means that the
// easiest way of indicating that something cannot be hoisted is to claim
// that it side-effects some miscellaneous thing.
@@ -60,9 +62,9 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
// versions of those nodes that backward-exit instead, but I'm not convinced
// of the soundness.
//
- // - Some nodes lie, and claim that they do not read the JSCell_structureID,
- // JSCell_typeInfoFlags, etc. These are nodes that use the structure in a way
- // that does not depend on things that change under structure transitions.
+ // - Some nodes lie, and claim that they do not read the JSCell_structure.
+ // These are nodes that use the structure in a way that does not depend on
+ // things that change under structure transitions.
//
// - It's implicitly understood that OSR exits read the world. This is why we
// generally don't move or eliminate stores. Every node can exit, so the
@@ -77,389 +79,153 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
// can use it for IR dumps. No promises on whether the answers are sound
// prior to type inference - though they probably could be if we did some
// small hacking.
- //
- // - If you do read(Stack) or read(World), then make sure that readTop() in
- // PreciseLocalClobberize is correct.
- // While read() and write() are fairly self-explanatory - they track what sorts of things the
- // node may read or write - the def() functor is more tricky. It tells you the heap locations
- // (not just abstract heaps) that are defined by a node. A heap location comprises an abstract
- // heap, some nodes, and a LocationKind. Briefly, a location defined by a node is a location
- // whose value can be deduced from looking at the node itself. The locations returned must obey
- // the following properties:
- //
- // - If someone wants to CSE a load from the heap, then a HeapLocation object should be
- // sufficient to find a single matching node.
- //
- // - The abstract heap is the only abstract heap that could be clobbered to invalidate any such
- // CSE attempt. I.e. if clobberize() reports that on every path between some node and a node
- // that defines a HeapLocation that it wanted, there were no writes to any abstract heap that
- // overlap the location's heap, then we have a sound match. Effectively, the semantics of
- // write() and def() are intertwined such that for them to be sound they must agree on what
- // is CSEable.
- //
- // read(), write(), and def() for heap locations is enough to do GCSE on effectful things. To
- // keep things simple, this code will also def() pure things. def() must be overloaded to also
- // accept PureValue. This way, a client of clobberize() can implement GCSE entirely using the
- // information that clobberize() passes to write() and def(). Other clients of clobberize() can
- // just ignore def() by using a NoOpClobberize functor.
-
if (edgesUseStructure(graph, node))
- read(JSCell_structureID);
+ read(JSCell_structure);
switch (node->op()) {
case JSConstant:
- case DoubleConstant:
- case Int52Constant:
- def(PureValue(node, node->constant()));
- return;
-
+ case WeakJSConstant:
case Identity:
case Phantom:
- case Check:
- case ExtractOSREntryLocal:
- case CheckStructureImmediate:
- return;
-
+ case Breakpoint:
+ case ProfileWillCall:
+ case ProfileDidCall:
case BitAnd:
case BitOr:
case BitXor:
case BitLShift:
case BitRShift:
case BitURShift:
+ case ValueToInt32:
+ case ArithAdd:
+ case ArithSub:
+ case ArithNegate:
+ case ArithMul:
case ArithIMul:
+ case ArithDiv:
+ case ArithMod:
case ArithAbs:
- case ArithClz32:
case ArithMin:
case ArithMax:
- case ArithPow:
case ArithSqrt:
- case ArithFRound:
case ArithSin:
case ArithCos:
- case ArithLog:
case GetScope:
case SkipScope:
+ case CheckFunction:
case StringCharCodeAt:
case StringFromCharCode:
case CompareEqConstant:
+ case CompareStrictEqConstant:
case CompareStrictEq:
case IsUndefined:
case IsBoolean:
case IsNumber:
case IsString:
- case IsObject:
case LogicalNot:
+ case Int32ToDouble:
+ case ExtractOSREntryLocal:
+ case Int52ToDouble:
+ case Int52ToValue:
case CheckInBounds:
- case DoubleRep:
- case ValueRep:
- case Int52Rep:
- case BooleanToNumber:
- case FiatInt52:
- case MakeRope:
- case ValueToInt32:
- case GetExecutable:
- case BottomValue:
- case TypeOf:
- def(PureValue(node));
- return;
-
- case HasGenericProperty:
- case HasStructureProperty:
- case GetEnumerableLength:
- case GetPropertyEnumerator: {
- read(Heap);
- write(SideState);
- return;
- }
-
- case GetDirectPname: {
- // This reads and writes heap because it can end up calling a generic getByVal
- // if the Structure changed, which could in turn end up calling a getter.
- read(World);
- write(Heap);
- return;
- }
-
- case ToIndexString:
- case GetEnumeratorStructurePname:
- case GetEnumeratorGenericPname: {
- def(PureValue(node));
- return;
- }
-
- case HasIndexedProperty: {
- read(JSObject_butterfly);
- ArrayMode mode = node->arrayMode();
- switch (mode.type()) {
- case Array::Int32: {
- if (mode.isInBounds()) {
- read(Butterfly_publicLength);
- read(IndexedInt32Properties);
- def(HeapLocation(HasIndexedPropertyLoc, IndexedInt32Properties, node->child1(), node->child2()), LazyNode(node));
- return;
- }
- read(Heap);
- return;
- }
-
- case Array::Double: {
- if (mode.isInBounds()) {
- read(Butterfly_publicLength);
- read(IndexedDoubleProperties);
- def(HeapLocation(HasIndexedPropertyLoc, IndexedDoubleProperties, node->child1(), node->child2()), LazyNode(node));
- return;
- }
- read(Heap);
- return;
- }
-
- case Array::Contiguous: {
- if (mode.isInBounds()) {
- read(Butterfly_publicLength);
- read(IndexedContiguousProperties);
- def(HeapLocation(HasIndexedPropertyLoc, IndexedContiguousProperties, node->child1(), node->child2()), LazyNode(node));
- return;
- }
- read(Heap);
- return;
- }
-
- case Array::ArrayStorage: {
- if (mode.isInBounds()) {
- read(Butterfly_vectorLength);
- read(IndexedArrayStorageProperties);
- return;
- }
- read(Heap);
- return;
- }
-
- default: {
- read(World);
- write(Heap);
- return;
- }
- }
- RELEASE_ASSERT_NOT_REACHED();
- return;
- }
-
- case ArithAdd:
- case ArithSub:
- case ArithNegate:
- case ArithMul:
- case ArithDiv:
- case ArithMod:
- case DoubleAsInt32:
- case UInt32ToNumber:
- def(PureValue(node, node->arithMode()));
- return;
-
- case ArithRound:
- def(PureValue(node, static_cast<uintptr_t>(node->arithRoundingMode())));
- return;
-
- case CheckCell:
- def(PureValue(CheckCell, AdjacencyList(AdjacencyList::Fixed, node->child1()), node->cellOperand()));
- return;
-
- case CheckNotEmpty:
- def(PureValue(CheckNotEmpty, AdjacencyList(AdjacencyList::Fixed, node->child1())));
- return;
-
- case CheckIdent:
- def(PureValue(CheckIdent, AdjacencyList(AdjacencyList::Fixed, node->child1()), node->uidOperand()));
- return;
-
case ConstantStoragePointer:
- def(PureValue(node, node->storagePointer()));
+ case UInt32ToNumber:
+ case DoubleAsInt32:
+ case Check:
return;
-
+
case MovHint:
case ZombieHint:
- case KillStack:
case Upsilon:
case Phi:
+ case Flush:
case PhantomLocal:
case SetArgument:
+ case PhantomArguments:
case Jump:
case Branch:
case Switch:
case Throw:
case ForceOSRExit:
- case CheckBadCell:
case Return:
case Unreachable:
case CheckTierUpInLoop:
case CheckTierUpAtReturn:
case CheckTierUpAndOSREnter:
- case CheckTierUpWithNestedTriggerAndOSREnter:
case LoopHint:
- case Breakpoint:
- case ProfileWillCall:
- case ProfileDidCall:
- case ProfileType:
- case ProfileControlFlow:
- case StoreBarrier:
- case PutHint:
- write(SideState);
- return;
-
case InvalidationPoint:
write(SideState);
- def(HeapLocation(InvalidationPointLoc, Watchpoint_fire), LazyNode(node));
return;
-
- case Flush:
- read(AbstractHeap(Stack, node->local()));
+
+ case VariableWatchpoint:
+ case TypedArrayWatchpoint:
+ read(Watchpoint_fire);
write(SideState);
return;
-
+
case NotifyWrite:
write(Watchpoint_fire);
write(SideState);
return;
- case CreateActivation: {
- SymbolTable* table = node->castOperand<SymbolTable*>();
- if (table->singletonScope()->isStillValid())
- write(Watchpoint_fire);
- read(HeapObjectCount);
- write(HeapObjectCount);
+ case CreateActivation:
+ case CreateArguments:
+ clobberizeForAllocation(read, write);
+ write(SideState);
+ write(Watchpoint_fire);
return;
- }
- case CreateDirectArguments:
- case CreateScopedArguments:
- case CreateClonedArguments:
- read(Stack);
- read(HeapObjectCount);
- write(HeapObjectCount);
- return;
-
- case PhantomDirectArguments:
- case PhantomClonedArguments:
- // DFG backend requires that the locals that this reads are flushed. FTL backend can handle those
- // locals being promoted.
- if (!isFTL(graph.m_plan.mode))
- read(Stack);
-
- // Even though it's phantom, it still has the property that one can't be replaced with another.
- read(HeapObjectCount);
- write(HeapObjectCount);
+ case FunctionReentryWatchpoint:
+ read(Watchpoint_fire);
return;
case ToThis:
case CreateThis:
read(MiscFields);
- read(HeapObjectCount);
- write(HeapObjectCount);
+ clobberizeForAllocation(read, write);
return;
case VarInjectionWatchpoint:
- read(MiscFields);
- def(HeapLocation(VarInjectionWatchpointLoc, MiscFields), LazyNode(node));
- return;
-
- case IsObjectOrNull:
- read(MiscFields);
- def(HeapLocation(IsObjectOrNullLoc, MiscFields, node->child1()), LazyNode(node));
- return;
-
+ case AllocationProfileWatchpoint:
+ case IsObject:
case IsFunction:
+ case TypeOf:
read(MiscFields);
- def(HeapLocation(IsFunctionLoc, MiscFields, node->child1()), LazyNode(node));
return;
case GetById:
case GetByIdFlush:
case PutById:
- case PutByIdFlush:
case PutByIdDirect:
case ArrayPush:
case ArrayPop:
case Call:
case Construct:
- case CallVarargs:
- case CallForwardVarargs:
- case ConstructVarargs:
- case ConstructForwardVarargs:
case ToPrimitive:
case In:
+ case GetMyArgumentsLengthSafe:
+ case GetMyArgumentByValSafe:
case ValueAdd:
read(World);
- write(Heap);
- return;
-
- case GetGetter:
- read(GetterSetter_getter);
- def(HeapLocation(GetterLoc, GetterSetter_getter, node->child1()), LazyNode(node));
- return;
-
- case GetSetter:
- read(GetterSetter_setter);
- def(HeapLocation(SetterLoc, GetterSetter_setter, node->child1()), LazyNode(node));
+ write(World);
return;
case GetCallee:
- read(AbstractHeap(Stack, JSStack::Callee));
- def(HeapLocation(StackLoc, AbstractHeap(Stack, JSStack::Callee)), LazyNode(node));
- return;
-
- case GetArgumentCount:
- read(AbstractHeap(Stack, JSStack::ArgumentCount));
- def(HeapLocation(StackPayloadLoc, AbstractHeap(Stack, JSStack::ArgumentCount)), LazyNode(node));
+ read(AbstractHeap(Variables, JSStack::Callee));
return;
case GetLocal:
- read(AbstractHeap(Stack, node->local()));
- def(HeapLocation(StackLoc, AbstractHeap(Stack, node->local())), LazyNode(node));
+ case GetArgument:
+ read(AbstractHeap(Variables, node->local()));
return;
case SetLocal:
- write(AbstractHeap(Stack, node->local()));
- def(HeapLocation(StackLoc, AbstractHeap(Stack, node->local())), LazyNode(node->child1().node()));
- return;
-
- case GetStack: {
- AbstractHeap heap(Stack, node->stackAccessData()->local);
- read(heap);
- def(HeapLocation(StackLoc, heap), LazyNode(node));
- return;
- }
-
- case PutStack: {
- AbstractHeap heap(Stack, node->stackAccessData()->local);
- write(heap);
- def(HeapLocation(StackLoc, heap), LazyNode(node->child1().node()));
- return;
- }
-
- case LoadVarargs: {
- read(World);
- write(Heap);
- LoadVarargsData* data = node->loadVarargsData();
- write(AbstractHeap(Stack, data->count.offset()));
- for (unsigned i = data->limit; i--;)
- write(AbstractHeap(Stack, data->start.offset() + static_cast<int>(i)));
- return;
- }
-
- case ForwardVarargs: {
- // We could be way more precise here.
- read(Stack);
-
- LoadVarargsData* data = node->loadVarargsData();
- write(AbstractHeap(Stack, data->count.offset()));
- for (unsigned i = data->limit; i--;)
- write(AbstractHeap(Stack, data->start.offset() + static_cast<int>(i)));
+ write(AbstractHeap(Variables, node->local()));
return;
- }
case GetLocalUnlinked:
- read(AbstractHeap(Stack, node->unlinkedLocal()));
- def(HeapLocation(StackLoc, AbstractHeap(Stack, node->unlinkedLocal())), LazyNode(node));
+ read(AbstractHeap(Variables, node->unlinkedLocal()));
return;
case GetByVal: {
@@ -467,10 +233,10 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
switch (mode.type()) {
case Array::SelectUsingPredictions:
case Array::Unprofiled:
- case Array::SelectUsingArguments:
+ case Array::Undecided:
// Assume the worst since we don't have profiling yet.
read(World);
- write(Heap);
+ write(World);
return;
case Array::ForceExit:
@@ -479,75 +245,61 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
case Array::Generic:
read(World);
- write(Heap);
+ write(World);
return;
case Array::String:
if (mode.isOutOfBounds()) {
read(World);
- write(Heap);
+ write(World);
return;
}
// This appears to read nothing because it's only reading immutable data.
- def(PureValue(node, mode.asWord()));
return;
- case Array::DirectArguments:
- read(DirectArgumentsProperties);
- def(HeapLocation(IndexedPropertyLoc, DirectArgumentsProperties, node->child1(), node->child2()), LazyNode(node));
- return;
-
- case Array::ScopedArguments:
- read(ScopeProperties);
- def(HeapLocation(IndexedPropertyLoc, ScopeProperties, node->child1(), node->child2()), LazyNode(node));
+ case Array::Arguments:
+ read(Arguments_registers);
+ read(Variables);
return;
case Array::Int32:
if (mode.isInBounds()) {
read(Butterfly_publicLength);
+ read(Butterfly_vectorLength);
read(IndexedInt32Properties);
- def(HeapLocation(IndexedPropertyLoc, IndexedInt32Properties, node->child1(), node->child2()), LazyNode(node));
return;
}
read(World);
- write(Heap);
+ write(World);
return;
case Array::Double:
if (mode.isInBounds()) {
read(Butterfly_publicLength);
+ read(Butterfly_vectorLength);
read(IndexedDoubleProperties);
- def(HeapLocation(IndexedPropertyLoc, IndexedDoubleProperties, node->child1(), node->child2()), LazyNode(node));
return;
}
read(World);
- write(Heap);
+ write(World);
return;
case Array::Contiguous:
if (mode.isInBounds()) {
read(Butterfly_publicLength);
+ read(Butterfly_vectorLength);
read(IndexedContiguousProperties);
- def(HeapLocation(IndexedPropertyLoc, IndexedContiguousProperties, node->child1(), node->child2()), LazyNode(node));
return;
}
read(World);
- write(Heap);
- return;
-
- case Array::Undecided:
- def(PureValue(node));
+ write(World);
return;
case Array::ArrayStorage:
case Array::SlowPutArrayStorage:
- if (mode.isInBounds()) {
- read(Butterfly_vectorLength);
- read(IndexedArrayStorageProperties);
- return;
- }
+ // Give up on life for now.
read(World);
- write(Heap);
+ write(World);
return;
case Array::Int8Array:
@@ -560,36 +312,26 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
case Array::Float32Array:
case Array::Float64Array:
read(TypedArrayProperties);
- read(MiscFields);
- def(HeapLocation(IndexedPropertyLoc, TypedArrayProperties, node->child1(), node->child2()), LazyNode(node));
+ read(JSArrayBufferView_vector);
+ read(JSArrayBufferView_length);
return;
}
RELEASE_ASSERT_NOT_REACHED();
return;
}
-
- case GetMyArgumentByVal: {
- read(Stack);
- // FIXME: It would be trivial to have a def here.
- // https://bugs.webkit.org/show_bug.cgi?id=143077
- return;
- }
case PutByValDirect:
case PutByVal:
case PutByValAlias: {
ArrayMode mode = node->arrayMode();
- Node* base = graph.varArgChild(node, 0).node();
- Node* index = graph.varArgChild(node, 1).node();
- Node* value = graph.varArgChild(node, 2).node();
switch (mode.modeForPut().type()) {
case Array::SelectUsingPredictions:
- case Array::SelectUsingArguments:
case Array::Unprofiled:
case Array::Undecided:
+ case Array::String:
// Assume the worst since we don't have profiling yet.
read(World);
- write(Heap);
+ write(World);
return;
case Array::ForceExit:
@@ -598,59 +340,57 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
case Array::Generic:
read(World);
- write(Heap);
+ write(World);
+ return;
+
+ case Array::Arguments:
+ read(Arguments_registers);
+ read(Arguments_numArguments);
+ read(Arguments_slowArguments);
+ write(Variables);
return;
case Array::Int32:
if (node->arrayMode().isOutOfBounds()) {
read(World);
- write(Heap);
+ write(World);
return;
}
read(Butterfly_publicLength);
read(Butterfly_vectorLength);
read(IndexedInt32Properties);
write(IndexedInt32Properties);
- if (node->arrayMode().mayStoreToHole())
- write(Butterfly_publicLength);
- def(HeapLocation(IndexedPropertyLoc, IndexedInt32Properties, base, index), LazyNode(value));
return;
case Array::Double:
if (node->arrayMode().isOutOfBounds()) {
read(World);
- write(Heap);
+ write(World);
return;
}
read(Butterfly_publicLength);
read(Butterfly_vectorLength);
read(IndexedDoubleProperties);
write(IndexedDoubleProperties);
- if (node->arrayMode().mayStoreToHole())
- write(Butterfly_publicLength);
- def(HeapLocation(IndexedPropertyLoc, IndexedDoubleProperties, base, index), LazyNode(value));
return;
case Array::Contiguous:
if (node->arrayMode().isOutOfBounds()) {
read(World);
- write(Heap);
+ write(World);
return;
}
read(Butterfly_publicLength);
read(Butterfly_vectorLength);
read(IndexedContiguousProperties);
write(IndexedContiguousProperties);
- if (node->arrayMode().mayStoreToHole())
- write(Butterfly_publicLength);
- def(HeapLocation(IndexedPropertyLoc, IndexedContiguousProperties, base, index), LazyNode(value));
return;
case Array::ArrayStorage:
case Array::SlowPutArrayStorage:
// Give up on life for now.
read(World);
- write(Heap);
+ write(World);
return;
case Array::Int8Array:
@@ -662,15 +402,9 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
case Array::Uint32Array:
case Array::Float32Array:
case Array::Float64Array:
- read(MiscFields);
+ read(JSArrayBufferView_vector);
+ read(JSArrayBufferView_length);
write(TypedArrayProperties);
- // FIXME: We can't def() anything here because these operations truncate their inputs.
- // https://bugs.webkit.org/show_bug.cgi?id=134737
- return;
- case Array::String:
- case Array::DirectArguments:
- case Array::ScopedArguments:
- DFG_CRASH(graph, node, "impossible array mode for put");
return;
}
RELEASE_ASSERT_NOT_REACHED();
@@ -678,111 +412,66 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
}
case CheckStructure:
- read(JSCell_structureID);
- return;
-
+ case StructureTransitionWatchpoint:
case CheckArray:
- read(JSCell_indexingType);
- read(JSCell_typeInfoType);
- read(JSCell_structureID);
- return;
-
case CheckHasInstance:
- read(JSCell_typeInfoFlags);
- def(HeapLocation(CheckHasInstanceLoc, JSCell_typeInfoFlags, node->child1()), LazyNode(node));
- return;
-
case InstanceOf:
- read(JSCell_structureID);
- def(HeapLocation(InstanceOfLoc, JSCell_structureID, node->child1(), node->child2()), LazyNode(node));
+ read(JSCell_structure);
return;
-
+
+ case CheckExecutable:
+ read(JSFunction_executable);
+ return;
+
case PutStructure:
- write(JSCell_structureID);
- write(JSCell_typeInfoType);
- write(JSCell_typeInfoFlags);
- write(JSCell_indexingType);
+ case PhantomPutStructure:
+ write(JSCell_structure);
return;
case AllocatePropertyStorage:
write(JSObject_butterfly);
- def(HeapLocation(ButterflyLoc, JSObject_butterfly, node->child1()), LazyNode(node));
+ clobberizeForAllocation(read, write);
return;
case ReallocatePropertyStorage:
read(JSObject_butterfly);
write(JSObject_butterfly);
- def(HeapLocation(ButterflyLoc, JSObject_butterfly, node->child1()), LazyNode(node));
+ clobberizeForAllocation(read, write);
return;
case GetButterfly:
read(JSObject_butterfly);
- def(HeapLocation(ButterflyLoc, JSObject_butterfly, node->child1()), LazyNode(node));
return;
case Arrayify:
case ArrayifyToStructure:
- read(JSCell_structureID);
- read(JSCell_indexingType);
+ read(JSCell_structure);
read(JSObject_butterfly);
- write(JSCell_structureID);
- write(JSCell_indexingType);
+ write(JSCell_structure);
write(JSObject_butterfly);
- write(Watchpoint_fire);
+ clobberizeForAllocation(read, write);
return;
case GetIndexedPropertyStorage:
- if (node->arrayMode().type() == Array::String) {
- def(PureValue(node, node->arrayMode().asWord()));
+ if (node->arrayMode().type() == Array::String)
return;
- }
- read(MiscFields);
- def(HeapLocation(IndexedPropertyStorageLoc, MiscFields, node->child1()), LazyNode(node));
+ read(JSArrayBufferView_vector);
return;
case GetTypedArrayByteOffset:
- read(MiscFields);
- def(HeapLocation(TypedArrayByteOffsetLoc, MiscFields, node->child1()), LazyNode(node));
+ read(JSArrayBufferView_vector);
+ read(JSArrayBufferView_mode);
+ read(Butterfly_arrayBuffer);
+ read(ArrayBuffer_data);
return;
case GetByOffset:
- case GetGetterSetterByOffset: {
- unsigned identifierNumber = node->storageAccessData().identifierNumber;
- AbstractHeap heap(NamedProperties, identifierNumber);
- read(heap);
- def(HeapLocation(NamedPropertyLoc, heap, node->child2()), LazyNode(node));
+ read(AbstractHeap(NamedProperties, graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber));
return;
- }
- case MultiGetByOffset: {
- read(JSCell_structureID);
- read(JSObject_butterfly);
- AbstractHeap heap(NamedProperties, node->multiGetByOffsetData().identifierNumber);
- read(heap);
- def(HeapLocation(NamedPropertyLoc, heap, node->child1()), LazyNode(node));
+ case PutByOffset:
+ write(AbstractHeap(NamedProperties, graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber));
return;
- }
-
- case MultiPutByOffset: {
- read(JSCell_structureID);
- read(JSObject_butterfly);
- AbstractHeap heap(NamedProperties, node->multiPutByOffsetData().identifierNumber);
- write(heap);
- if (node->multiPutByOffsetData().writesStructures())
- write(JSCell_structureID);
- if (node->multiPutByOffsetData().reallocatesStorage())
- write(JSObject_butterfly);
- def(HeapLocation(NamedPropertyLoc, heap, node->child1()), LazyNode(node->child2().node()));
- return;
- }
-
- case PutByOffset: {
- unsigned identifierNumber = node->storageAccessData().identifierNumber;
- AbstractHeap heap(NamedProperties, identifierNumber);
- write(heap);
- def(HeapLocation(NamedPropertyLoc, heap, node->child2()), LazyNode(node->child3().node()));
- return;
- }
case GetArrayLength: {
ArrayMode mode = node->arrayMode();
@@ -793,184 +482,77 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
case Array::ArrayStorage:
case Array::SlowPutArrayStorage:
read(Butterfly_publicLength);
- def(HeapLocation(ArrayLengthLoc, Butterfly_publicLength, node->child1()), LazyNode(node));
return;
case Array::String:
- def(PureValue(node, mode.asWord()));
return;
- case Array::DirectArguments:
- case Array::ScopedArguments:
- read(MiscFields);
- def(HeapLocation(ArrayLengthLoc, MiscFields, node->child1()), LazyNode(node));
+ case Array::Arguments:
+ read(Arguments_overrideLength);
+ read(Arguments_numArguments);
return;
default:
- ASSERT(mode.typedArrayType() != NotTypedArray);
- read(MiscFields);
- def(HeapLocation(ArrayLengthLoc, MiscFields, node->child1()), LazyNode(node));
+ read(JSArrayBufferView_length);
return;
}
}
- case GetClosureVar:
- read(AbstractHeap(ScopeProperties, node->scopeOffset().offset()));
- def(HeapLocation(ClosureVariableLoc, AbstractHeap(ScopeProperties, node->scopeOffset().offset()), node->child1()), LazyNode(node));
+ case GetMyScope:
+ read(AbstractHeap(Variables, JSStack::ScopeChain));
return;
- case PutClosureVar:
- write(AbstractHeap(ScopeProperties, node->scopeOffset().offset()));
- def(HeapLocation(ClosureVariableLoc, AbstractHeap(ScopeProperties, node->scopeOffset().offset()), node->child1()), LazyNode(node->child2().node()));
+ case SkipTopScope:
+ read(AbstractHeap(Variables, graph.activationRegister()));
return;
- case GetFromArguments: {
- AbstractHeap heap(DirectArgumentsProperties, node->capturedArgumentsOffset().offset());
- read(heap);
- def(HeapLocation(DirectArgumentsLoc, heap, node->child1()), LazyNode(node));
+ case GetClosureRegisters:
+ read(JSVariableObject_registers);
return;
- }
- case PutToArguments: {
- AbstractHeap heap(DirectArgumentsProperties, node->capturedArgumentsOffset().offset());
- write(heap);
- def(HeapLocation(DirectArgumentsLoc, heap, node->child1()), LazyNode(node->child2().node()));
+ case GetClosureVar:
+ read(AbstractHeap(Variables, node->varNumber()));
+ return;
+
+ case PutClosureVar:
+ write(AbstractHeap(Variables, node->varNumber()));
return;
- }
case GetGlobalVar:
- read(AbstractHeap(Absolute, node->variablePointer()));
- def(HeapLocation(GlobalVariableLoc, AbstractHeap(Absolute, node->variablePointer())), LazyNode(node));
+ read(AbstractHeap(Absolute, node->registerPointer()));
return;
case PutGlobalVar:
- write(AbstractHeap(Absolute, node->variablePointer()));
- def(HeapLocation(GlobalVariableLoc, AbstractHeap(Absolute, node->variablePointer())), LazyNode(node->child2().node()));
+ write(AbstractHeap(Absolute, node->registerPointer()));
return;
+ case NewObject:
+ case NewArray:
case NewArrayWithSize:
- case NewTypedArray:
- read(HeapObjectCount);
- write(HeapObjectCount);
+ case NewArrayBuffer:
+ case NewRegexp:
+ case NewStringObject:
+ case MakeRope:
+ case NewFunctionNoCheck:
+ case NewFunction:
+ case NewFunctionExpression:
+ clobberizeForAllocation(read, write);
return;
-
- case NewArray: {
- read(HeapObjectCount);
- write(HeapObjectCount);
-
- unsigned numElements = node->numChildren();
-
- def(HeapLocation(ArrayLengthLoc, Butterfly_publicLength, node),
- LazyNode(graph.freeze(jsNumber(numElements))));
-
- if (!numElements)
+
+ case NewTypedArray:
+ clobberizeForAllocation(read, write);
+ switch (node->child1().useKind()) {
+ case Int32Use:
return;
-
- AbstractHeap heap;
- switch (node->indexingType()) {
- case ALL_DOUBLE_INDEXING_TYPES:
- heap = IndexedDoubleProperties;
- break;
-
- case ALL_INT32_INDEXING_TYPES:
- heap = IndexedInt32Properties;
- break;
-
- case ALL_CONTIGUOUS_INDEXING_TYPES:
- heap = IndexedContiguousProperties;
- break;
-
- default:
+ case UntypedUse:
+ read(World);
+ write(World);
return;
- }
-
- if (numElements < graph.m_uint32ValuesInUse.size()) {
- for (unsigned operandIdx = 0; operandIdx < numElements; ++operandIdx) {
- Edge use = graph.m_varArgChildren[node->firstChild() + operandIdx];
- def(HeapLocation(IndexedPropertyLoc, heap, node, LazyNode(graph.freeze(jsNumber(operandIdx)))),
- LazyNode(use.node()));
- }
- } else {
- for (uint32_t operandIdx : graph.m_uint32ValuesInUse) {
- if (operandIdx >= numElements)
- continue;
- Edge use = graph.m_varArgChildren[node->firstChild() + operandIdx];
- // operandIdx comes from graph.m_uint32ValuesInUse and thus is guaranteed to be already frozen
- def(HeapLocation(IndexedPropertyLoc, heap, node, LazyNode(graph.freeze(jsNumber(operandIdx)))),
- LazyNode(use.node()));
- }
- }
- return;
- }
-
- case NewArrayBuffer: {
- read(HeapObjectCount);
- write(HeapObjectCount);
-
- unsigned numElements = node->numConstants();
- def(HeapLocation(ArrayLengthLoc, Butterfly_publicLength, node),
- LazyNode(graph.freeze(jsNumber(numElements))));
-
- AbstractHeap heap;
- NodeType op = JSConstant;
- switch (node->indexingType()) {
- case ALL_DOUBLE_INDEXING_TYPES:
- heap = IndexedDoubleProperties;
- op = DoubleConstant;
- break;
-
- case ALL_INT32_INDEXING_TYPES:
- heap = IndexedInt32Properties;
- break;
-
- case ALL_CONTIGUOUS_INDEXING_TYPES:
- heap = IndexedContiguousProperties;
- break;
-
default:
+ RELEASE_ASSERT_NOT_REACHED();
return;
}
-
- JSValue* data = graph.m_codeBlock->constantBuffer(node->startConstant());
- if (numElements < graph.m_uint32ValuesInUse.size()) {
- for (unsigned index = 0; index < numElements; ++index) {
- def(HeapLocation(IndexedPropertyLoc, heap, node, LazyNode(graph.freeze(jsNumber(index)))),
- LazyNode(graph.freeze(data[index]), op));
- }
- } else {
- Vector<uint32_t> possibleIndices;
- for (uint32_t index : graph.m_uint32ValuesInUse) {
- if (index >= numElements)
- continue;
- possibleIndices.append(index);
- }
- for (uint32_t index : possibleIndices) {
- def(HeapLocation(IndexedPropertyLoc, heap, node, LazyNode(graph.freeze(jsNumber(index)))),
- LazyNode(graph.freeze(data[index]), op));
- }
- }
- return;
- }
-
- case NewObject:
- case NewRegexp:
- case NewStringObject:
- case PhantomNewObject:
- case MaterializeNewObject:
- case PhantomNewFunction:
- case PhantomCreateActivation:
- case MaterializeCreateActivation:
- read(HeapObjectCount);
- write(HeapObjectCount);
- return;
- case NewFunction:
- if (node->castOperand<FunctionExecutable*>()->singletonFunction()->isStillValid())
- write(Watchpoint_fire);
- read(HeapObjectCount);
- write(HeapObjectCount);
- return;
-
case RegExpExec:
case RegExpTest:
read(RegExpState);
@@ -980,10 +562,9 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
case StringCharAt:
if (node->arrayMode().isOutOfBounds()) {
read(World);
- write(Heap);
+ write(World);
return;
}
- def(PureValue(node));
return;
case CompareEq:
@@ -991,38 +572,53 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
case CompareLessEq:
case CompareGreater:
case CompareGreaterEq:
- if (!node->isBinaryUseKind(UntypedUse)) {
- def(PureValue(node));
+ if (!node->isBinaryUseKind(UntypedUse))
return;
- }
read(World);
- write(Heap);
+ write(World);
return;
case ToString:
- case CallStringConstructor:
switch (node->child1().useKind()) {
case StringObjectUse:
case StringOrStringObjectUse:
- // These don't def a pure value, unfortunately. I'll avoid load-eliminating these for
- // now.
return;
case CellUse:
case UntypedUse:
read(World);
- write(Heap);
+ write(World);
return;
default:
RELEASE_ASSERT_NOT_REACHED();
return;
}
+
+ case TearOffActivation:
+ write(JSVariableObject_registers);
+ return;
+ case TearOffArguments:
+ write(Arguments_registers);
+ return;
+
+ case GetMyArgumentsLength:
+ read(AbstractHeap(Variables, graph.argumentsRegisterFor(node->codeOrigin)));
+ read(AbstractHeap(Variables, JSStack::ArgumentCount));
+ return;
+
+ case GetMyArgumentByVal:
+ read(Variables);
+ return;
+
+ case CheckArgumentsNotCreated:
+ read(AbstractHeap(Variables, graph.argumentsRegisterFor(node->codeOrigin)));
+ return;
+
case ThrowReferenceError:
write(SideState);
- read(HeapObjectCount);
- write(HeapObjectCount);
+ clobberizeForAllocation(read, write);
return;
case CountExecution:
@@ -1030,20 +626,26 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
read(InternalState);
write(InternalState);
return;
+
+ case StoreBarrier:
+ case ConditionalStoreBarrier:
+ case StoreBarrierWithNullCheck:
+ read(BarrierState);
+ write(BarrierState);
+ return;
case LastNodeType:
RELEASE_ASSERT_NOT_REACHED();
return;
}
- DFG_CRASH(graph, node, toCString("Unrecognized node type: ", Graph::opName(node->op())).data());
+ RELEASE_ASSERT_NOT_REACHED();
}
class NoOpClobberize {
public:
NoOpClobberize() { }
- template<typename... T>
- void operator()(T...) const { }
+ void operator()(AbstractHeap) { }
};
class CheckClobberize {
@@ -1053,13 +655,12 @@ public:
{
}
- template<typename... T>
- void operator()(T...) const { m_result = true; }
+ void operator()(AbstractHeap) { m_result = true; }
bool result() const { return m_result; }
private:
- mutable bool m_result;
+ bool m_result;
};
bool doesWrites(Graph&, Node*);
@@ -1072,7 +673,7 @@ public:
{
}
- void operator()(AbstractHeap otherHeap) const
+ void operator()(AbstractHeap otherHeap)
{
if (m_result)
return;
@@ -1083,80 +684,11 @@ public:
private:
AbstractHeap m_heap;
- mutable bool m_result;
+ bool m_result;
};
-bool accessesOverlap(Graph&, Node*, AbstractHeap);
bool writesOverlap(Graph&, Node*, AbstractHeap);
-bool clobbersHeap(Graph&, Node*);
-
-// We would have used bind() for these, but because of the overlaoding that we are doing,
-// it's quite a bit of clearer to just write this out the traditional way.
-
-template<typename T>
-class ReadMethodClobberize {
-public:
- ReadMethodClobberize(T& value)
- : m_value(value)
- {
- }
-
- void operator()(AbstractHeap heap) const
- {
- m_value.read(heap);
- }
-private:
- T& m_value;
-};
-
-template<typename T>
-class WriteMethodClobberize {
-public:
- WriteMethodClobberize(T& value)
- : m_value(value)
- {
- }
-
- void operator()(AbstractHeap heap) const
- {
- m_value.write(heap);
- }
-private:
- T& m_value;
-};
-
-template<typename T>
-class DefMethodClobberize {
-public:
- DefMethodClobberize(T& value)
- : m_value(value)
- {
- }
-
- void operator()(PureValue value) const
- {
- m_value.def(value);
- }
-
- void operator()(HeapLocation location, LazyNode node) const
- {
- m_value.def(location, node);
- }
-
-private:
- T& m_value;
-};
-
-template<typename Adaptor>
-void clobberize(Graph& graph, Node* node, Adaptor& adaptor)
-{
- ReadMethodClobberize<Adaptor> read(adaptor);
- WriteMethodClobberize<Adaptor> write(adaptor);
- DefMethodClobberize<Adaptor> def(adaptor);
- clobberize(graph, node, read, write, def);
-}
-
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGCombinedLiveness.cpp b/Source/JavaScriptCore/dfg/DFGCombinedLiveness.cpp
deleted file mode 100644
index ccf943358..000000000
--- a/Source/JavaScriptCore/dfg/DFGCombinedLiveness.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGCombinedLiveness.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGAvailabilityMap.h"
-#include "DFGBlockMapInlines.h"
-#include "FullBytecodeLiveness.h"
-#include "JSCInlines.h"
-
-namespace JSC { namespace DFG {
-
-HashSet<Node*> liveNodesAtHead(Graph& graph, BasicBlock* block)
-{
- HashSet<Node*> seen;
- for (Node* node : block->ssa->liveAtHead)
- seen.add(node);
-
- AvailabilityMap& availabilityMap = block->ssa->availabilityAtHead;
- graph.forAllLocalsLiveInBytecode(
- block->firstOrigin().forExit,
- [&] (VirtualRegister reg) {
- availabilityMap.closeStartingWithLocal(
- reg,
- [&] (Node* node) -> bool {
- return seen.contains(node);
- },
- [&] (Node* node) -> bool {
- return seen.add(node).isNewEntry;
- });
- });
-
- return seen;
-}
-
-CombinedLiveness::CombinedLiveness(Graph& graph)
- : liveAtHead(graph)
- , liveAtTail(graph)
-{
- // First compute the liveAtHead for each block.
- for (BasicBlock* block : graph.blocksInNaturalOrder())
- liveAtHead[block] = liveNodesAtHead(graph, block);
-
- // Now compute the liveAtTail by unifying the liveAtHead of the successors.
- for (BasicBlock* block : graph.blocksInNaturalOrder()) {
- for (BasicBlock* successor : block->successors()) {
- for (Node* node : liveAtHead[successor])
- liveAtTail[block].add(node);
- }
- }
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGCombinedLiveness.h b/Source/JavaScriptCore/dfg/DFGCombinedLiveness.h
deleted file mode 100644
index ff761cf72..000000000
--- a/Source/JavaScriptCore/dfg/DFGCombinedLiveness.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGCombinedLiveness_h
-#define DFGCombinedLiveness_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGBlockMap.h"
-#include "DFGGraph.h"
-
-namespace JSC { namespace DFG {
-
-// Returns the set of nodes live at tail, both due to due DFG and due to bytecode (i.e. OSR exit).
-HashSet<Node*> liveNodesAtHead(Graph&, BasicBlock*);
-
-struct CombinedLiveness {
- CombinedLiveness() { }
-
- CombinedLiveness(Graph&);
-
- BlockMap<HashSet<Node*>> liveAtHead;
- BlockMap<HashSet<Node*>> liveAtTail;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGCombinedLiveness_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGCommon.cpp b/Source/JavaScriptCore/dfg/DFGCommon.cpp
index cd2a12c73..adb08b595 100644
--- a/Source/JavaScriptCore/dfg/DFGCommon.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCommon.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,38 +26,9 @@
#include "config.h"
#include "DFGCommon.h"
-#include "DFGNode.h"
-#include "JSCInlines.h"
-#include <wtf/PrintStream.h>
-
#if ENABLE(DFG_JIT)
-namespace JSC { namespace DFG {
-
-static StaticLock crashLock;
-
-void startCrashing()
-{
- crashLock.lock();
-}
-
-bool isCrashing()
-{
- return crashLock.isLocked();
-}
-
-bool stringLessThan(StringImpl& a, StringImpl& b)
-{
- unsigned minLength = std::min(a.length(), b.length());
- for (unsigned i = 0; i < minLength; ++i) {
- if (a[i] == b[i])
- continue;
- return a[i] < b[i];
- }
- return a.length() < b.length();
-}
-
-} } // namespace JSC::DFG
+#include "DFGNode.h"
namespace WTF {
@@ -138,28 +109,3 @@ void printInternal(PrintStream& out, ProofStatus status)
#endif // ENABLE(DFG_JIT)
-namespace WTF {
-
-using namespace JSC::DFG;
-
-void printInternal(PrintStream& out, CapabilityLevel capabilityLevel)
-{
- switch (capabilityLevel) {
- case CannotCompile:
- out.print("CannotCompile");
- return;
- case CanCompile:
- out.print("CanCompile");
- return;
- case CanCompileAndInline:
- out.print("CanCompileAndInline");
- return;
- case CapabilityLevelNotSet:
- out.print("CapabilityLevelNotSet");
- return;
- }
- RELEASE_ASSERT_NOT_REACHED();
-}
-
-} // namespace WTF
-
diff --git a/Source/JavaScriptCore/dfg/DFGCommon.h b/Source/JavaScriptCore/dfg/DFGCommon.h
index df8c8ef0d..7b4b1db5f 100644
--- a/Source/JavaScriptCore/dfg/DFGCommon.h
+++ b/Source/JavaScriptCore/dfg/DFGCommon.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,10 +26,11 @@
#ifndef DFGCommon_h
#define DFGCommon_h
-#include "DFGCompilationMode.h"
+#include <wtf/Platform.h>
#if ENABLE(DFG_JIT)
+#include "CodeOrigin.h"
#include "Options.h"
#include "VirtualRegister.h"
@@ -62,21 +63,14 @@ enum RefNodeMode {
DontRefNode
};
-enum SwitchKind {
- SwitchImm,
- SwitchChar,
- SwitchString,
- SwitchCell
-};
-
-inline bool verboseCompilationEnabled(CompilationMode mode = DFGMode)
+inline bool verboseCompilationEnabled()
{
- return Options::verboseCompilation() || Options::dumpGraphAtEachPhase() || (isFTL(mode) && Options::verboseFTLCompilation());
+ return Options::verboseCompilation() || Options::dumpGraphAtEachPhase();
}
-inline bool logCompilationChanges(CompilationMode mode = DFGMode)
+inline bool logCompilationChanges()
{
- return verboseCompilationEnabled(mode) || Options::logCompilationChanges();
+ return verboseCompilationEnabled() || Options::logCompilationChanges();
}
inline bool shouldDumpGraphAtEachPhase()
@@ -93,6 +87,15 @@ inline bool validationEnabled()
#endif
}
+inline bool enableConcurrentJIT()
+{
+#if ENABLE(CONCURRENT_JIT)
+ return Options::enableConcurrentJIT() && Options::numberOfCompilerThreads();
+#else
+ return false;
+#endif
+}
+
inline bool enableInt52()
{
#if USE(JSVALUE64)
@@ -102,59 +105,9 @@ inline bool enableInt52()
#endif
}
-enum NoResultTag { NoResult };
+enum SpillRegistersMode { NeedToSpill, DontSpill };
-// The prediction propagator effectively does four passes, with the last pass
-// being done by the separate FixuPhase.
-enum PredictionPass {
- // We're converging in a straght-forward forward flow fixpoint. This is the
- // most conventional part of the propagator - it makes only monotonic decisions
- // based on value profiles and rare case profiles. It ignores baseline JIT rare
- // case profiles. The goal here is to develop a good guess of which variables
- // are likely to be purely numerical, which generally doesn't require knowing
- // the rare case profiles.
- PrimaryPass,
-
- // At this point we know what is numerical and what isn't. Non-numerical inputs
- // to arithmetic operations will not have useful information in the Baseline JIT
- // rare case profiles because Baseline may take slow path on non-numerical
- // inputs even if the DFG could handle the input on the fast path. Boolean
- // inputs are the most obvious example. This pass of prediction propagation will
- // use Baseline rare case profiles for purely numerical operations and it will
- // ignore them for everything else. The point of this pass is to develop a good
- // guess of which variables are likely to be doubles.
- //
- // This pass is intentionally weird and goes against what is considered good
- // form when writing a static analysis: a new data flow of booleans will cause
- // us to ignore rare case profiles except that by then, we will have already
- // propagated double types based on our prior assumption that we shouldn't
- // ignore rare cases. This probably won't happen because the PrimaryPass is
- // almost certainly going to establish what is and isn't numerical. But it's
- // conceivable that during this pass we will discover a new boolean data flow.
- // This ends up being sound because the prediction propagator could literally
- // make any guesses it wants and still be sound (worst case, we OSR exit more
- // often or use too general of types are run a bit slower). This will converge
- // because we force monotonicity on the types of nodes and variables. So, the
- // worst thing that can happen is that we violate basic laws of theoretical
- // decency.
- RareCasePass,
-
- // At this point we know what is numerical and what isn't, and we also know what
- // is a double and what isn't. So, we start forcing variables to be double.
- // Doing so may have a cascading effect so this is a fixpoint. It's monotonic
- // in the sense that once a variable is forced double, it cannot be forced in
- // the other direction.
- DoubleVotingPass,
-
- // This pass occurs once we have converged. At this point we are just installing
- // type checks based on the conclusions we have already reached. It's important
- // for this pass to reach the same conclusions that DoubleVotingPass reached.
- FixupPass
-};
-
-enum StructureRegistrationState { HaveNotStartedRegistering, AllStructuresAreRegistered };
-
-enum StructureRegistrationResult { StructureRegisteredNormally, StructureRegisteredAndWatched };
+enum NoResultTag { NoResult };
enum OptimizationFixpointState { BeforeFixpoint, FixpointNotConverged, FixpointConverged };
@@ -192,10 +145,12 @@ enum GraphForm {
// expect to be live at the head, and which locals they make available at the
// tail. ThreadedCPS form also implies that:
//
- // - GetLocals and SetLocals are not redundant within a basic block.
+ // - GetLocals and SetLocals to uncaptured variables are not redundant within
+ // a basic block.
//
// - All GetLocals and Flushes are linked directly to the last access point
- // of the variable, which must not be another GetLocal.
+ // of the variable, which must not be another GetLocal if the variable is
+ // uncaptured.
//
// - Phantom(Phi) is not legal, but PhantomLocal is.
//
@@ -253,11 +208,6 @@ inline KillStatus killStatusForDoesKill(bool doesKill)
return doesKill ? DoesKill : DoesNotKill;
}
-enum class PlanStage {
- Initial,
- AfterFixup
-};
-
template<typename T, typename U>
bool checkAndSet(T& left, U right)
{
@@ -267,40 +217,6 @@ bool checkAndSet(T& left, U right)
return true;
}
-// If possible, this will acquire a lock to make sure that if multiple threads
-// start crashing at the same time, you get coherent dump output. Use this only
-// when you're forcing a crash with diagnostics.
-void startCrashing();
-
-JS_EXPORT_PRIVATE bool isCrashing();
-
-struct NodeAndIndex {
- NodeAndIndex()
- : node(nullptr)
- , index(UINT_MAX)
- {
- }
-
- NodeAndIndex(Node* node, unsigned index)
- : node(node)
- , index(index)
- {
- ASSERT(!node == (index == UINT_MAX));
- }
-
- bool operator!() const
- {
- return !node;
- }
-
- Node* node;
- unsigned index;
-};
-
-// A less-than operator for strings that is useful for generating string switches. Sorts by <
-// relation on characters. Ensures that if a is a prefix of b, then a < b.
-bool stringLessThan(StringImpl& a, StringImpl& b);
-
} } // namespace JSC::DFG
namespace WTF {
@@ -319,12 +235,7 @@ namespace JSC { namespace DFG {
// Put things here that must be defined even if ENABLE(DFG_JIT) is false.
-enum CapabilityLevel {
- CannotCompile,
- CanCompile,
- CanCompileAndInline,
- CapabilityLevelNotSet
-};
+enum CapabilityLevel { CannotCompile, CanInline, CanCompile, CanCompileAndInline, CapabilityLevelNotSet };
inline bool canCompile(CapabilityLevel level)
{
@@ -340,6 +251,7 @@ inline bool canCompile(CapabilityLevel level)
inline bool canInline(CapabilityLevel level)
{
switch (level) {
+ case CanInline:
case CanCompileAndInline:
return true;
default:
@@ -352,6 +264,14 @@ inline CapabilityLevel leastUpperBound(CapabilityLevel a, CapabilityLevel b)
switch (a) {
case CannotCompile:
return CannotCompile;
+ case CanInline:
+ switch (b) {
+ case CanInline:
+ case CanCompileAndInline:
+ return CanInline;
+ default:
+ return CannotCompile;
+ }
case CanCompile:
switch (b) {
case CanCompile:
@@ -371,23 +291,16 @@ inline CapabilityLevel leastUpperBound(CapabilityLevel a, CapabilityLevel b)
}
// Unconditionally disable DFG disassembly support if the DFG is not compiled in.
-inline bool shouldShowDisassembly(CompilationMode mode = DFGMode)
+inline bool shouldShowDisassembly()
{
#if ENABLE(DFG_JIT)
- return Options::showDisassembly() || Options::showDFGDisassembly() || (isFTL(mode) && Options::showFTLDisassembly());
+ return Options::showDisassembly() || Options::showDFGDisassembly();
#else
- UNUSED_PARAM(mode);
return false;
#endif
}
} } // namespace JSC::DFG
-namespace WTF {
-
-void printInternal(PrintStream&, JSC::DFG::CapabilityLevel);
-
-} // namespace WTF
-
#endif // DFGCommon_h
diff --git a/Source/JavaScriptCore/dfg/DFGCommonData.cpp b/Source/JavaScriptCore/dfg/DFGCommonData.cpp
index 881d19f8c..7b7ed0e40 100644
--- a/Source/JavaScriptCore/dfg/DFGCommonData.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCommonData.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,8 +31,7 @@
#include "CodeBlock.h"
#include "DFGNode.h"
#include "DFGPlan.h"
-#include "JSCInlines.h"
-#include "TrackedReferences.h"
+#include "Operations.h"
#include "VM.h"
namespace JSC { namespace DFG {
@@ -41,9 +40,9 @@ void CommonData::notifyCompilingStructureTransition(Plan& plan, CodeBlock* codeB
{
plan.transitions.addLazily(
codeBlock,
- node->origin.semantic.codeOriginOwner(),
- node->transition()->previous,
- node->transition()->next);
+ node->codeOrigin.codeOriginOwner(),
+ node->structureTransitionData().previousStructure,
+ node->structureTransitionData().newStructure);
}
unsigned CommonData::addCodeOrigin(CodeOrigin codeOrigin)
@@ -73,27 +72,6 @@ bool CommonData::invalidate()
return true;
}
-void CommonData::validateReferences(const TrackedReferences& trackedReferences)
-{
- if (InlineCallFrameSet* set = inlineCallFrames.get()) {
- for (InlineCallFrame* inlineCallFrame : *set) {
- for (ValueRecovery& recovery : inlineCallFrame->arguments) {
- if (recovery.isConstant())
- trackedReferences.check(recovery.constant());
- }
-
- if (ScriptExecutable* executable = inlineCallFrame->executable.get())
- trackedReferences.check(executable);
-
- if (inlineCallFrame->calleeRecovery.isConstant())
- trackedReferences.check(inlineCallFrame->calleeRecovery.constant());
- }
- }
-
- for (AdaptiveStructureWatchpoint* watchpoint : adaptiveStructureWatchpoints)
- watchpoint->key().validateReferences(trackedReferences);
-}
-
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGCommonData.h b/Source/JavaScriptCore/dfg/DFGCommonData.h
index 2a2d0a2d0..17c5cce11 100644
--- a/Source/JavaScriptCore/dfg/DFGCommonData.h
+++ b/Source/JavaScriptCore/dfg/DFGCommonData.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,24 +26,23 @@
#ifndef DFGCommonData_h
#define DFGCommonData_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "CodeBlockJettisoningWatchpoint.h"
-#include "DFGAdaptiveInferredPropertyValueWatchpoint.h"
-#include "DFGAdaptiveStructureWatchpoint.h"
#include "DFGJumpReplacement.h"
#include "InlineCallFrameSet.h"
#include "JSCell.h"
+#include "ProfiledCodeBlockJettisoningWatchpoint.h"
#include "ProfilerCompilation.h"
#include "SymbolTable.h"
-#include <wtf/Bag.h>
#include <wtf/Noncopyable.h>
namespace JSC {
class CodeBlock;
class Identifier;
-class TrackedReferences;
namespace DFG {
@@ -74,12 +73,13 @@ class CommonData {
public:
CommonData()
: isStillValid(true)
+ , machineCaptureStart(std::numeric_limits<int>::max())
, frameRegisterCount(std::numeric_limits<unsigned>::max())
, requiredRegisterCountForExit(std::numeric_limits<unsigned>::max())
{ }
void notifyCompilingStructureTransition(Plan&, CodeBlock*, Node*);
- unsigned addCodeOrigin(CodeOrigin);
+ unsigned addCodeOrigin(CodeOrigin codeOrigin);
void shrinkToFit();
@@ -89,19 +89,15 @@ public:
{
return std::max(frameRegisterCount, requiredRegisterCountForExit);
}
-
- void validateReferences(const TrackedReferences&);
- RefPtr<InlineCallFrameSet> inlineCallFrames;
+ OwnPtr<InlineCallFrameSet> inlineCallFrames;
Vector<CodeOrigin, 0, UnsafeVectorOverflow> codeOrigins;
Vector<Identifier> dfgIdentifiers;
Vector<WeakReferenceTransition> transitions;
Vector<WriteBarrier<JSCell>> weakReferences;
- Vector<WriteBarrier<Structure>> weakStructureReferences;
- Bag<CodeBlockJettisoningWatchpoint> watchpoints;
- Bag<AdaptiveStructureWatchpoint> adaptiveStructureWatchpoints;
- Bag<AdaptiveInferredPropertyValueWatchpoint> adaptiveInferredPropertyValueWatchpoints;
+ SegmentedVector<CodeBlockJettisoningWatchpoint, 1, 0> watchpoints;
+ SegmentedVector<ProfiledCodeBlockJettisoningWatchpoint, 1, 0> profiledWatchpoints;
Vector<JumpReplacement> jumpReplacements;
RefPtr<Profiler::Compilation> compilation;
@@ -109,9 +105,8 @@ public:
bool allTransitionsHaveBeenMarked; // Initialized and used on every GC.
bool isStillValid;
-#if USE(JSVALUE32_64)
- std::unique_ptr<Bag<double>> doubleConstants;
-#endif
+ int machineCaptureStart;
+ std::unique_ptr<SlowArgument[]> slowArguments;
unsigned frameRegisterCount;
unsigned requiredRegisterCountForExit;
diff --git a/Source/JavaScriptCore/dfg/DFGCompilationKey.cpp b/Source/JavaScriptCore/dfg/DFGCompilationKey.cpp
index 20ad082cc..d31ac9e2d 100644
--- a/Source/JavaScriptCore/dfg/DFGCompilationKey.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCompilationKey.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,11 +26,9 @@
#include "config.h"
#include "DFGCompilationKey.h"
-#if ENABLE(DFG_JIT)
-
#include "CodeBlock.h"
-#include "CodeBlockSet.h"
-#include "JSCInlines.h"
+
+#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGCompilationKey.h b/Source/JavaScriptCore/dfg/DFGCompilationKey.h
index ff562a048..a866acdf1 100644
--- a/Source/JavaScriptCore/dfg/DFGCompilationKey.h
+++ b/Source/JavaScriptCore/dfg/DFGCompilationKey.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,7 +32,6 @@
namespace JSC {
class CodeBlock;
-class CodeBlockSet;
namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGCompilationMode.cpp b/Source/JavaScriptCore/dfg/DFGCompilationMode.cpp
index 20de99603..99d95331c 100644
--- a/Source/JavaScriptCore/dfg/DFGCompilationMode.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCompilationMode.cpp
@@ -28,8 +28,6 @@
#if ENABLE(DFG_JIT)
-#include "JSCInlines.h"
-
namespace WTF {
using namespace JSC::DFG;
diff --git a/Source/JavaScriptCore/dfg/DFGCompilationMode.h b/Source/JavaScriptCore/dfg/DFGCompilationMode.h
index 2d6e49ad5..1035f60bc 100644
--- a/Source/JavaScriptCore/dfg/DFGCompilationMode.h
+++ b/Source/JavaScriptCore/dfg/DFGCompilationMode.h
@@ -37,17 +37,6 @@ enum CompilationMode {
FTLForOSREntryMode
};
-inline bool isFTL(CompilationMode mode)
-{
- switch (mode) {
- case FTLMode:
- case FTLForOSREntryMode:
- return true;
- default:
- return false;
- }
-}
-
} } // namespace JSC::DFG
namespace WTF {
diff --git a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
index 0c6467230..f58761160 100644
--- a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,14 +29,13 @@
#if ENABLE(DFG_JIT)
#include "DFGAbstractInterpreterInlines.h"
-#include "DFGArgumentsUtilities.h"
#include "DFGBasicBlock.h"
#include "DFGGraph.h"
#include "DFGInPlaceAbstractState.h"
#include "DFGInsertionSet.h"
#include "DFGPhase.h"
#include "GetByIdStatus.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include "PutByIdStatus.h"
namespace JSC { namespace DFG {
@@ -63,16 +62,6 @@ public:
changed |= foldConstants(block);
}
- if (changed && m_graph.m_form == SSA) {
- // It's now possible that we have Upsilons pointed at JSConstants. Fix that.
- for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
- BasicBlock* block = m_graph.block(blockIndex);
- if (!block)
- continue;
- fixUpsilons(block);
- }
- }
-
return changed;
}
@@ -87,17 +76,19 @@ private:
Node* node = block->at(indexInBlock);
- bool alreadyHandled = false;
bool eliminated = false;
switch (node->op()) {
- case BooleanToNumber: {
- if (node->child1().useKind() == UntypedUse
- && !m_interpreter.needsTypeCheck(node->child1(), SpecBoolean))
- node->child1().setUseKind(BooleanUse);
+ case CheckArgumentsNotCreated: {
+ if (!isEmptySpeculation(
+ m_state.variables().operand(
+ m_graph.argumentsRegisterFor(node->codeOrigin)).m_type))
+ break;
+ node->convertToPhantom();
+ eliminated = true;
break;
}
-
+
case CheckStructure:
case ArrayifyToStructure: {
AbstractValue& value = m_state.forNode(node->child1());
@@ -106,71 +97,27 @@ private:
set = node->structure();
else
set = node->structureSet();
- if (value.m_structure.isSubsetOf(set)) {
+ if (value.m_currentKnownStructure.isSubsetOf(set)) {
m_interpreter.execute(indexInBlock); // Catch the fact that we may filter on cell.
- node->remove();
+ node->convertToPhantom();
eliminated = true;
break;
}
- break;
- }
-
- case GetIndexedPropertyStorage: {
- JSArrayBufferView* view = m_graph.tryGetFoldableView(
- m_state.forNode(node->child1()).m_value, node->arrayMode());
- if (!view)
- break;
-
- if (view->mode() == FastTypedArray) {
- // FIXME: It would be awesome to be able to fold the property storage for
- // these GC-allocated typed arrays. For now it doesn't matter because the
- // most common use-cases for constant typed arrays involve large arrays with
- // aliased buffer views.
- // https://bugs.webkit.org/show_bug.cgi?id=125425
+ StructureAbstractValue& structureValue = value.m_futurePossibleStructure;
+ if (structureValue.isSubsetOf(set)
+ && structureValue.hasSingleton()) {
+ Structure* structure = structureValue.singleton();
+ m_interpreter.execute(indexInBlock); // Catch the fact that we may filter on cell.
+ AdjacencyList children = node->children;
+ children.removeEdge(0);
+ if (!!children.child1())
+ m_insertionSet.insertNode(indexInBlock, SpecNone, Phantom, node->codeOrigin, children);
+ node->children.setChild2(Edge());
+ node->children.setChild3(Edge());
+ node->convertToStructureTransitionWatchpoint(structure);
+ eliminated = true;
break;
}
-
- m_interpreter.execute(indexInBlock);
- eliminated = true;
-
- m_insertionSet.insertCheck(indexInBlock, node->origin, node->children);
- node->convertToConstantStoragePointer(view->vector());
- break;
- }
-
- case CheckStructureImmediate: {
- AbstractValue& value = m_state.forNode(node->child1());
- StructureSet& set = node->structureSet();
-
- if (value.value()) {
- if (Structure* structure = jsDynamicCast<Structure*>(value.value())) {
- if (set.contains(structure)) {
- m_interpreter.execute(indexInBlock);
- node->remove();
- eliminated = true;
- break;
- }
- }
- }
-
- if (PhiChildren* phiChildren = m_interpreter.phiChildren()) {
- bool allGood = true;
- phiChildren->forAllTransitiveIncomingValues(
- node,
- [&] (Node* incoming) {
- if (Structure* structure = incoming->dynamicCastConstant<Structure*>()) {
- if (set.contains(structure))
- return;
- }
- allGood = false;
- });
- if (allGood) {
- m_interpreter.execute(indexInBlock);
- node->remove();
- eliminated = true;
- break;
- }
- }
break;
}
@@ -178,369 +125,235 @@ private:
case Arrayify: {
if (!node->arrayMode().alreadyChecked(m_graph, node, m_state.forNode(node->child1())))
break;
- node->remove();
+ node->convertToPhantom();
eliminated = true;
break;
}
- case PutStructure: {
- if (m_state.forNode(node->child1()).m_structure.onlyStructure() != node->transition()->next)
+ case CheckFunction: {
+ if (m_state.forNode(node->child1()).value() != node->function())
break;
-
- node->remove();
+ node->convertToPhantom();
eliminated = true;
break;
}
- case CheckCell: {
- if (m_state.forNode(node->child1()).value() != node->cellOperand()->value())
- break;
- node->remove();
- eliminated = true;
- break;
- }
-
- case CheckNotEmpty: {
- if (m_state.forNode(node->child1()).m_type & SpecEmpty)
- break;
- node->remove();
- eliminated = true;
- break;
- }
-
- case CheckIdent: {
- UniquedStringImpl* uid = node->uidOperand();
- const UniquedStringImpl* constantUid = nullptr;
-
- JSValue childConstant = m_state.forNode(node->child1()).value();
- if (childConstant) {
- if (uid->isSymbol()) {
- if (childConstant.isSymbol())
- constantUid = asSymbol(childConstant)->privateName().uid();
- } else {
- if (childConstant.isString()) {
- if (const auto* impl = asString(childConstant)->tryGetValueImpl()) {
- // Edge filtering requires that a value here should be StringIdent.
- // However, a constant value propagated in DFG is not filtered.
- // So here, we check the propagated value is actually an atomic string.
- // And if it's not, we just ignore.
- if (impl->isAtomic())
- constantUid = static_cast<const UniquedStringImpl*>(impl);
- }
- }
- }
- }
-
- if (constantUid == uid) {
- node->remove();
- eliminated = true;
- }
- break;
- }
-
case CheckInBounds: {
JSValue left = m_state.forNode(node->child1()).value();
JSValue right = m_state.forNode(node->child2()).value();
if (left && right && left.isInt32() && right.isInt32()
&& static_cast<uint32_t>(left.asInt32()) < static_cast<uint32_t>(right.asInt32())) {
- node->remove();
+ node->convertToPhantom();
eliminated = true;
break;
}
break;
}
+
+ case GetById:
+ case GetByIdFlush: {
+ CodeOrigin codeOrigin = node->codeOrigin;
+ Edge childEdge = node->child1();
+ Node* child = childEdge.node();
+ unsigned identifierNumber = node->identifierNumber();
- case GetMyArgumentByVal: {
- JSValue index = m_state.forNode(node->child2()).value();
- if (!index || !index.isInt32())
+ if (childEdge.useKind() != CellUse)
break;
- Node* arguments = node->child1().node();
- InlineCallFrame* inlineCallFrame = arguments->origin.semantic.inlineCallFrame;
-
- // Don't try to do anything if the index is known to be outside our static bounds. Note
- // that our static bounds are usually strictly larger than the dynamic bounds. The
- // exception is something like this, assuming foo() is not inlined:
- //
- // function foo() { return arguments[5]; }
- //
- // Here the static bound on number of arguments is 0, and we're accessing index 5. We
- // will not strength-reduce this to GetStack because GetStack is otherwise assumed by the
- // compiler to access those variables that are statically accounted for; for example if
- // we emitted a GetStack on arg6 we would have out-of-bounds access crashes anywhere that
- // uses an Operands<> map. There is not much cost to continuing to use a
- // GetMyArgumentByVal in such statically-out-of-bounds accesses; we just lose CFA unless
- // GCSE removes the access entirely.
- if (inlineCallFrame) {
- if (index.asUInt32() >= inlineCallFrame->arguments.size() - 1)
- break;
- } else {
- if (index.asUInt32() >= m_state.variables().numberOfArguments() - 1)
- break;
- }
-
- m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before.
-
- StackAccessData* data;
- if (inlineCallFrame) {
- data = m_graph.m_stackAccessData.add(
- VirtualRegister(
- inlineCallFrame->stackOffset +
- CallFrame::argumentOffset(index.asInt32())),
- FlushedJSValue);
- } else {
- data = m_graph.m_stackAccessData.add(
- virtualRegisterForArgument(index.asInt32() + 1), FlushedJSValue);
- }
-
- if (inlineCallFrame && !inlineCallFrame->isVarargs()
- && index.asUInt32() < inlineCallFrame->arguments.size() - 1) {
- node->convertToGetStack(data);
- eliminated = true;
+ Structure* structure = m_state.forNode(child).bestProvenStructure();
+ if (!structure)
break;
- }
-
- Node* length = emitCodeToGetArgumentsArrayLength(
- m_insertionSet, arguments, indexInBlock, node->origin);
- m_insertionSet.insertNode(
- indexInBlock, SpecNone, CheckInBounds, node->origin,
- node->child2(), Edge(length, Int32Use));
- node->convertToGetStack(data);
- eliminated = true;
- break;
- }
-
- case MultiGetByOffset: {
- Edge baseEdge = node->child1();
- Node* base = baseEdge.node();
- MultiGetByOffsetData& data = node->multiGetByOffsetData();
-
- // First prune the variants, then check if the MultiGetByOffset can be
- // strength-reduced to a GetByOffset.
-
- AbstractValue baseValue = m_state.forNode(base);
- m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before.
- alreadyHandled = true; // Don't allow the default constant folder to do things to this.
+ bool needsWatchpoint = !m_state.forNode(child).m_currentKnownStructure.hasSingleton();
+ bool needsCellCheck = m_state.forNode(child).m_type & ~SpecCell;
- for (unsigned i = 0; i < data.cases.size(); ++i) {
- MultiGetByOffsetCase& getCase = data.cases[i];
- getCase.set().filter(baseValue);
- if (getCase.set().isEmpty()) {
- data.cases[i--] = data.cases.last();
- data.cases.removeLast();
- changed = true;
- }
- }
+ GetByIdStatus status = GetByIdStatus::computeFor(
+ vm(), structure, m_graph.identifiers()[identifierNumber]);
- if (data.cases.size() != 1)
+ if (!status.isSimple()) {
+ // FIXME: We could handle prototype cases.
+ // https://bugs.webkit.org/show_bug.cgi?id=110386
break;
+ }
- emitGetByOffset(indexInBlock, node, baseValue, data.cases[0], data.identifierNumber);
- changed = true;
- break;
- }
-
- case MultiPutByOffset: {
- Edge baseEdge = node->child1();
- Node* base = baseEdge.node();
- MultiPutByOffsetData& data = node->multiPutByOffsetData();
+ ASSERT(status.structureSet().size() == 1);
+ ASSERT(!status.chain());
+ ASSERT(status.structureSet().singletonStructure() == structure);
- AbstractValue baseValue = m_state.forNode(base);
-
- m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before.
- alreadyHandled = true; // Don't allow the default constant folder to do things to this.
+ // Now before we do anything else, push the CFA forward over the GetById
+ // and make sure we signal to the loop that it should continue and not
+ // do any eliminations.
+ m_interpreter.execute(indexInBlock);
+ eliminated = true;
-
- for (unsigned i = 0; i < data.variants.size(); ++i) {
- PutByIdVariant& variant = data.variants[i];
- variant.oldStructure().filter(baseValue);
-
- if (variant.oldStructure().isEmpty()) {
- data.variants[i--] = data.variants.last();
- data.variants.removeLast();
- changed = true;
- continue;
- }
-
- if (variant.kind() == PutByIdVariant::Transition
- && variant.oldStructure().onlyStructure() == variant.newStructure()) {
- variant = PutByIdVariant::replace(
- variant.oldStructure(),
- variant.offset());
- changed = true;
- }
+ if (needsWatchpoint) {
+ m_insertionSet.insertNode(
+ indexInBlock, SpecNone, StructureTransitionWatchpoint, codeOrigin,
+ OpInfo(structure), childEdge);
+ } else if (needsCellCheck) {
+ m_insertionSet.insertNode(
+ indexInBlock, SpecNone, Phantom, codeOrigin, childEdge);
}
-
- if (data.variants.size() != 1)
- break;
- emitPutByOffset(
- indexInBlock, node, baseValue, data.variants[0], data.identifierNumber);
- changed = true;
- break;
- }
-
- case GetById:
- case GetByIdFlush: {
- Edge childEdge = node->child1();
- Node* child = childEdge.node();
- unsigned identifierNumber = node->identifierNumber();
+ childEdge.setUseKind(KnownCellUse);
- AbstractValue baseValue = m_state.forNode(child);
-
- m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before.
- alreadyHandled = true; // Don't allow the default constant folder to do things to this.
-
- if (baseValue.m_structure.isTop() || baseValue.m_structure.isClobbered()
- || (node->child1().useKind() == UntypedUse || (baseValue.m_type & ~SpecCell)))
- break;
+ Edge propertyStorage;
- GetByIdStatus status = GetByIdStatus::computeFor(
- baseValue.m_structure.set(), m_graph.identifiers()[identifierNumber]);
- if (!status.isSimple())
- break;
-
- for (unsigned i = status.numVariants(); i--;) {
- if (!status[i].conditionSet().isEmpty()) {
- // FIXME: We could handle prototype cases.
- // https://bugs.webkit.org/show_bug.cgi?id=110386
- break;
- }
- }
-
- if (status.numVariants() == 1) {
- emitGetByOffset(indexInBlock, node, baseValue, status[0], identifierNumber);
- changed = true;
- break;
+ if (isInlineOffset(status.offset()))
+ propertyStorage = childEdge;
+ else {
+ propertyStorage = Edge(m_insertionSet.insertNode(
+ indexInBlock, SpecNone, GetButterfly, codeOrigin, childEdge));
}
- if (!isFTL(m_graph.m_plan.mode))
- break;
+ node->convertToGetByOffset(m_graph.m_storageAccessData.size(), propertyStorage);
- MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
- for (const GetByIdVariant& variant : status.variants()) {
- data->cases.append(
- MultiGetByOffsetCase(
- variant.structureSet(),
- GetByOffsetMethod::load(variant.offset())));
- }
- data->identifierNumber = identifierNumber;
- node->convertToMultiGetByOffset(data);
- changed = true;
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = status.offset();
+ storageAccessData.identifierNumber = identifierNumber;
+ m_graph.m_storageAccessData.append(storageAccessData);
break;
}
case PutById:
- case PutByIdDirect:
- case PutByIdFlush: {
- NodeOrigin origin = node->origin;
+ case PutByIdDirect: {
+ CodeOrigin codeOrigin = node->codeOrigin;
Edge childEdge = node->child1();
Node* child = childEdge.node();
unsigned identifierNumber = node->identifierNumber();
ASSERT(childEdge.useKind() == CellUse);
- AbstractValue baseValue = m_state.forNode(child);
-
- m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before.
- alreadyHandled = true; // Don't allow the default constant folder to do things to this.
-
- if (baseValue.m_structure.isTop() || baseValue.m_structure.isClobbered())
+ Structure* structure = m_state.forNode(child).bestProvenStructure();
+ if (!structure)
break;
+ bool needsWatchpoint = !m_state.forNode(child).m_currentKnownStructure.hasSingleton();
+ bool needsCellCheck = m_state.forNode(child).m_type & ~SpecCell;
+
PutByIdStatus status = PutByIdStatus::computeFor(
- m_graph.globalObjectFor(origin.semantic),
- baseValue.m_structure.set(),
+ vm(),
+ m_graph.globalObjectFor(codeOrigin),
+ structure,
m_graph.identifiers()[identifierNumber],
node->op() == PutByIdDirect);
- if (!status.isSimple())
+ if (!status.isSimpleReplace() && !status.isSimpleTransition())
break;
- ASSERT(status.numVariants());
+ ASSERT(status.oldStructure() == structure);
- if (status.numVariants() > 1 && !isFTL(m_graph.m_plan.mode))
- break;
+ // Now before we do anything else, push the CFA forward over the PutById
+ // and make sure we signal to the loop that it should continue and not
+ // do any eliminations.
+ m_interpreter.execute(indexInBlock);
+ eliminated = true;
- changed = true;
-
- bool allGood = true;
- for (const PutByIdVariant& variant : status.variants()) {
- if (!allGood)
- break;
- for (const ObjectPropertyCondition& condition : variant.conditionSet()) {
- if (m_graph.watchCondition(condition))
- continue;
-
- Structure* structure = condition.object()->structure();
- if (!condition.structureEnsuresValidity(structure)) {
- allGood = false;
- break;
+ if (needsWatchpoint) {
+ m_insertionSet.insertNode(
+ indexInBlock, SpecNone, StructureTransitionWatchpoint, codeOrigin,
+ OpInfo(structure), childEdge);
+ } else if (needsCellCheck) {
+ m_insertionSet.insertNode(
+ indexInBlock, SpecNone, Phantom, codeOrigin, childEdge);
+ }
+
+ childEdge.setUseKind(KnownCellUse);
+
+ StructureTransitionData* transitionData = 0;
+ if (status.isSimpleTransition()) {
+ transitionData = m_graph.addStructureTransitionData(
+ StructureTransitionData(structure, status.newStructure()));
+
+ if (node->op() == PutById) {
+ if (!structure->storedPrototype().isNull()) {
+ addStructureTransitionCheck(
+ codeOrigin, indexInBlock,
+ structure->storedPrototype().asCell());
+ }
+
+ m_graph.chains().addLazily(status.structureChain());
+
+ for (unsigned i = 0; i < status.structureChain()->size(); ++i) {
+ JSValue prototype = status.structureChain()->at(i)->storedPrototype();
+ if (prototype.isNull())
+ continue;
+ ASSERT(prototype.isCell());
+ addStructureTransitionCheck(
+ codeOrigin, indexInBlock, prototype.asCell());
}
-
- m_insertionSet.insertNode(
- indexInBlock, SpecNone, CheckStructure, node->origin,
- OpInfo(m_graph.addStructureSet(structure)),
- m_insertionSet.insertConstantForUse(
- indexInBlock, node->origin, condition.object(), KnownCellUse));
}
}
-
- if (!allGood)
- break;
- if (status.numVariants() == 1) {
- emitPutByOffset(indexInBlock, node, baseValue, status[0], identifierNumber);
- break;
+ Edge propertyStorage;
+
+ if (isInlineOffset(status.offset()))
+ propertyStorage = childEdge;
+ else if (status.isSimpleReplace() || structure->outOfLineCapacity() == status.newStructure()->outOfLineCapacity()) {
+ propertyStorage = Edge(m_insertionSet.insertNode(
+ indexInBlock, SpecNone, GetButterfly, codeOrigin, childEdge));
+ } else if (!structure->outOfLineCapacity()) {
+ ASSERT(status.newStructure()->outOfLineCapacity());
+ ASSERT(!isInlineOffset(status.offset()));
+ Node* allocatePropertyStorage = m_insertionSet.insertNode(
+ indexInBlock, SpecNone, AllocatePropertyStorage,
+ codeOrigin, OpInfo(transitionData), childEdge);
+ m_insertionSet.insertNode(indexInBlock, SpecNone, StoreBarrier, codeOrigin, Edge(node->child1().node(), KnownCellUse));
+ propertyStorage = Edge(allocatePropertyStorage);
+ } else {
+ ASSERT(structure->outOfLineCapacity());
+ ASSERT(status.newStructure()->outOfLineCapacity() > structure->outOfLineCapacity());
+ ASSERT(!isInlineOffset(status.offset()));
+
+ Node* reallocatePropertyStorage = m_insertionSet.insertNode(
+ indexInBlock, SpecNone, ReallocatePropertyStorage, codeOrigin,
+ OpInfo(transitionData), childEdge,
+ Edge(m_insertionSet.insertNode(
+ indexInBlock, SpecNone, GetButterfly, codeOrigin, childEdge)));
+ m_insertionSet.insertNode(indexInBlock, SpecNone, StoreBarrier, codeOrigin, Edge(node->child1().node(), KnownCellUse));
+ propertyStorage = Edge(reallocatePropertyStorage);
}
- ASSERT(isFTL(m_graph.m_plan.mode));
+ if (status.isSimpleTransition()) {
+ Node* putStructure = m_graph.addNode(SpecNone, PutStructure, codeOrigin, OpInfo(transitionData), childEdge);
+ m_insertionSet.insertNode(indexInBlock, SpecNone, StoreBarrier, codeOrigin, Edge(node->child1().node(), KnownCellUse));
+ m_insertionSet.insert(indexInBlock, putStructure);
+ }
- MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
- data->variants = status.variants();
- data->identifierNumber = identifierNumber;
- node->convertToMultiPutByOffset(data);
+ node->convertToPutByOffset(m_graph.m_storageAccessData.size(), propertyStorage);
+ m_insertionSet.insertNode(indexInBlock, SpecNone, ConditionalStoreBarrier, codeOrigin,
+ Edge(node->child2().node(), KnownCellUse), Edge(node->child3().node(), UntypedUse));
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = status.offset();
+ storageAccessData.identifierNumber = identifierNumber;
+ m_graph.m_storageAccessData.append(storageAccessData);
break;
}
- case ToPrimitive: {
- if (m_state.forNode(node->child1()).m_type & ~(SpecFullNumber | SpecBoolean | SpecString | SpecSymbol))
- break;
-
- node->convertToIdentity();
- changed = true;
+ case ConditionalStoreBarrier: {
+ if (!m_interpreter.needsTypeCheck(node->child2().node(), ~SpecCell)) {
+ node->convertToPhantom();
+ eliminated = true;
+ }
break;
}
-
- case Check: {
- alreadyHandled = true;
- m_interpreter.execute(indexInBlock);
- for (unsigned i = 0; i < AdjacencyList::Size; ++i) {
- Edge edge = node->children.child(i);
- if (!edge)
- break;
- if (edge.isProved() || edge.willNotHaveCheck()) {
- node->children.removeEdge(i--);
- changed = true;
- }
- }
+
+ case StoreBarrier:
+ case StoreBarrierWithNullCheck: {
break;
}
-
+
default:
break;
}
-
+
if (eliminated) {
changed = true;
continue;
}
- if (alreadyHandled)
- continue;
-
m_interpreter.execute(indexInBlock);
if (!m_state.isValid()) {
// If we invalidated then we shouldn't attempt to constant-fold. Here's an
@@ -559,23 +372,31 @@ private:
}
if (!node->shouldGenerate() || m_state.didClobber() || node->hasConstant())
continue;
+ JSValue value = m_state.forNode(node).value();
+ if (!value)
+ continue;
- // Interesting fact: this freezing that we do right here may turn an fragile value into
- // a weak value. See DFGValueStrength.h.
- FrozenValue* value = m_graph.freeze(m_state.forNode(node).value());
- if (!*value)
+ // Check if merging the abstract value of the constant into the abstract value
+ // we've proven for this node wouldn't widen the proof. If it widens the proof
+ // (i.e. says that the set contains more things in it than it previously did)
+ // then we refuse to fold.
+ AbstractValue oldValue = m_state.forNode(node);
+ AbstractValue constantValue;
+ constantValue.set(m_graph, value);
+ if (oldValue.merge(constantValue))
continue;
+
+ CodeOrigin codeOrigin = node->codeOrigin;
+ AdjacencyList children = node->children;
- if (node->op() == GetLocal) {
- // Need to preserve bytecode liveness in ThreadedCPS form. This wouldn't be necessary
- // if it wasn't for https://bugs.webkit.org/show_bug.cgi?id=144086.
- m_insertionSet.insertNode(
- indexInBlock, SpecNone, PhantomLocal, node->origin,
- OpInfo(node->variableAccessData()));
+ if (node->op() == GetLocal)
m_graph.dethread();
- } else
- m_insertionSet.insertCheck(indexInBlock, node->origin, node->children);
+ else
+ ASSERT(!node->hasVariableAccessData(m_graph));
+
m_graph.convertToConstant(node, value);
+ m_insertionSet.insertNode(
+ indexInBlock, SpecNone, Phantom, codeOrigin, children);
changed = true;
}
@@ -584,186 +405,22 @@ private:
return changed;
}
-
- void emitGetByOffset(unsigned indexInBlock, Node* node, const AbstractValue& baseValue, const MultiGetByOffsetCase& getCase, unsigned identifierNumber)
- {
- // When we get to here we have already emitted all of the requisite checks for everything.
- // So, we just need to emit what the method object tells us to emit.
-
- addBaseCheck(indexInBlock, node, baseValue, getCase.set());
-
- GetByOffsetMethod method = getCase.method();
-
- switch (method.kind()) {
- case GetByOffsetMethod::Invalid:
- RELEASE_ASSERT_NOT_REACHED();
- return;
-
- case GetByOffsetMethod::Constant:
- m_graph.convertToConstant(node, method.constant());
- return;
-
- case GetByOffsetMethod::Load:
- emitGetByOffset(indexInBlock, node, node->child1(), identifierNumber, method.offset());
- return;
-
- case GetByOffsetMethod::LoadFromPrototype: {
- Node* child = m_insertionSet.insertConstant(
- indexInBlock, node->origin, method.prototype());
- emitGetByOffset(
- indexInBlock, node, Edge(child, KnownCellUse), identifierNumber, method.offset());
- return;
- } }
-
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- void emitGetByOffset(unsigned indexInBlock, Node* node, const AbstractValue& baseValue, const GetByIdVariant& variant, unsigned identifierNumber)
- {
- Edge childEdge = node->child1();
-
- addBaseCheck(indexInBlock, node, baseValue, variant.structureSet());
-
- // We aren't set up to handle prototype stuff.
- DFG_ASSERT(m_graph, node, variant.conditionSet().isEmpty());
-
- if (JSValue value = m_graph.tryGetConstantProperty(baseValue.m_value, variant.structureSet(), variant.offset())) {
- m_graph.convertToConstant(node, m_graph.freeze(value));
- return;
- }
-
- emitGetByOffset(indexInBlock, node, childEdge, identifierNumber, variant.offset());
- }
-
- void emitGetByOffset(unsigned indexInBlock, Node* node, Edge childEdge, unsigned identifierNumber, PropertyOffset offset)
- {
- childEdge.setUseKind(KnownCellUse);
-
- Edge propertyStorage;
-
- if (isInlineOffset(offset))
- propertyStorage = childEdge;
- else {
- propertyStorage = Edge(m_insertionSet.insertNode(
- indexInBlock, SpecNone, GetButterfly, node->origin, childEdge));
- }
-
- StorageAccessData& data = *m_graph.m_storageAccessData.add();
- data.offset = offset;
- data.identifierNumber = identifierNumber;
-
- node->convertToGetByOffset(data, propertyStorage);
- }
- void emitPutByOffset(unsigned indexInBlock, Node* node, const AbstractValue& baseValue, const PutByIdVariant& variant, unsigned identifierNumber)
+ void addStructureTransitionCheck(CodeOrigin codeOrigin, unsigned indexInBlock, JSCell* cell)
{
- NodeOrigin origin = node->origin;
- Edge childEdge = node->child1();
-
- addBaseCheck(indexInBlock, node, baseValue, variant.oldStructure());
-
- childEdge.setUseKind(KnownCellUse);
-
- Transition* transition = 0;
- if (variant.kind() == PutByIdVariant::Transition) {
- transition = m_graph.m_transitions.add(
- variant.oldStructureForTransition(), variant.newStructure());
- }
-
- Edge propertyStorage;
-
- if (isInlineOffset(variant.offset()))
- propertyStorage = childEdge;
- else if (!variant.reallocatesStorage()) {
- propertyStorage = Edge(m_insertionSet.insertNode(
- indexInBlock, SpecNone, GetButterfly, origin, childEdge));
- } else if (!variant.oldStructureForTransition()->outOfLineCapacity()) {
- ASSERT(variant.newStructure()->outOfLineCapacity());
- ASSERT(!isInlineOffset(variant.offset()));
- Node* allocatePropertyStorage = m_insertionSet.insertNode(
- indexInBlock, SpecNone, AllocatePropertyStorage,
- origin, OpInfo(transition), childEdge);
- propertyStorage = Edge(allocatePropertyStorage);
- } else {
- ASSERT(variant.oldStructureForTransition()->outOfLineCapacity());
- ASSERT(variant.newStructure()->outOfLineCapacity() > variant.oldStructureForTransition()->outOfLineCapacity());
- ASSERT(!isInlineOffset(variant.offset()));
-
- Node* reallocatePropertyStorage = m_insertionSet.insertNode(
- indexInBlock, SpecNone, ReallocatePropertyStorage, origin,
- OpInfo(transition), childEdge,
- Edge(m_insertionSet.insertNode(
- indexInBlock, SpecNone, GetButterfly, origin, childEdge)));
- propertyStorage = Edge(reallocatePropertyStorage);
- }
-
- StorageAccessData& data = *m_graph.m_storageAccessData.add();
- data.offset = variant.offset();
- data.identifierNumber = identifierNumber;
+ Node* weakConstant = m_insertionSet.insertNode(
+ indexInBlock, speculationFromValue(cell), WeakJSConstant, codeOrigin, OpInfo(cell));
- node->convertToPutByOffset(data, propertyStorage);
-
- if (variant.kind() == PutByIdVariant::Transition) {
- // FIXME: PutStructure goes last until we fix either
- // https://bugs.webkit.org/show_bug.cgi?id=142921 or
- // https://bugs.webkit.org/show_bug.cgi?id=142924.
- m_insertionSet.insertNode(
- indexInBlock + 1, SpecNone, PutStructure, origin, OpInfo(transition), childEdge);
- }
- }
-
- void addBaseCheck(
- unsigned indexInBlock, Node* node, const AbstractValue& baseValue, const StructureSet& set)
- {
- if (!baseValue.m_structure.isSubsetOf(set)) {
- // Arises when we prune MultiGetByOffset. We could have a
- // MultiGetByOffset with a single variant that checks for structure S,
- // and the input has structures S and T, for example.
+ if (m_graph.watchpoints().isStillValid(cell->structure()->transitionWatchpointSet())) {
m_insertionSet.insertNode(
- indexInBlock, SpecNone, CheckStructure, node->origin,
- OpInfo(m_graph.addStructureSet(set)), node->child1());
+ indexInBlock, SpecNone, StructureTransitionWatchpoint, codeOrigin,
+ OpInfo(cell->structure()), Edge(weakConstant, CellUse));
return;
}
-
- if (baseValue.m_type & ~SpecCell)
- m_insertionSet.insertCheck(indexInBlock, node->origin, node->child1());
- }
-
- void addStructureTransitionCheck(NodeOrigin origin, unsigned indexInBlock, JSCell* cell, Structure* structure)
- {
- if (m_graph.registerStructure(cell->structure()) == StructureRegisteredAndWatched)
- return;
-
- m_graph.registerStructure(structure);
- Node* weakConstant = m_insertionSet.insertNode(
- indexInBlock, speculationFromValue(cell), JSConstant, origin,
- OpInfo(m_graph.freeze(cell)));
-
m_insertionSet.insertNode(
- indexInBlock, SpecNone, CheckStructure, origin,
- OpInfo(m_graph.addStructureSet(structure)), Edge(weakConstant, CellUse));
- }
-
- void fixUpsilons(BasicBlock* block)
- {
- for (unsigned nodeIndex = block->size(); nodeIndex--;) {
- Node* node = block->at(nodeIndex);
- if (node->op() != Upsilon)
- continue;
- switch (node->phi()->op()) {
- case Phi:
- break;
- case JSConstant:
- case DoubleConstant:
- case Int52Constant:
- node->remove();
- break;
- default:
- DFG_CRASH(m_graph, node, "Bad Upsilon phi() pointer");
- break;
- }
- }
+ indexInBlock, SpecNone, CheckStructure, codeOrigin,
+ OpInfo(m_graph.addStructureSet(cell->structure())), Edge(weakConstant, CellUse));
}
InPlaceAbstractState m_state;
diff --git a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.h b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.h
index d2f7e1351..cde16806c 100644
--- a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGConstantFoldingPhase_h
#define DFGConstantFoldingPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGConstantHoistingPhase.cpp b/Source/JavaScriptCore/dfg/DFGConstantHoistingPhase.cpp
deleted file mode 100644
index 68f3651c5..000000000
--- a/Source/JavaScriptCore/dfg/DFGConstantHoistingPhase.cpp
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGConstantHoistingPhase.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGGraph.h"
-#include "DFGInsertionSet.h"
-#include "DFGPhase.h"
-#include "DFGPredictionPropagationPhase.h"
-#include "DFGVariableAccessDataDump.h"
-#include "JSCInlines.h"
-
-namespace JSC { namespace DFG {
-
-namespace {
-
-class ConstantHoistingPhase : public Phase {
-public:
- ConstantHoistingPhase(Graph& graph)
- : Phase(graph, "constant hoisting")
- {
- }
-
- bool run()
- {
- DFG_ASSERT(m_graph, nullptr, m_graph.m_form == SSA);
-
- m_graph.clearReplacements();
-
- HashMap<FrozenValue*, Node*> jsValues;
- HashMap<FrozenValue*, Node*> doubleValues;
- HashMap<FrozenValue*, Node*> int52Values;
-
- auto valuesFor = [&] (NodeType op) -> HashMap<FrozenValue*, Node*>& {
- // Use a roundabout approach because clang thinks that this closure returning a
- // reference to a stack-allocated value in outer scope is a bug. It's not.
- HashMap<FrozenValue*, Node*>* result;
-
- switch (op) {
- case JSConstant:
- result = &jsValues;
- break;
- case DoubleConstant:
- result = &doubleValues;
- break;
- case Int52Constant:
- result = &int52Values;
- break;
- default:
- DFG_CRASH(m_graph, nullptr, "Invalid node type in valuesFor()");
- result = nullptr;
- break;
- }
-
- return *result;
- };
-
- Vector<Node*> toFree;
-
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- unsigned sourceIndex = 0;
- unsigned targetIndex = 0;
- while (sourceIndex < block->size()) {
- Node* node = block->at(sourceIndex++);
- switch (node->op()) {
- case JSConstant:
- case DoubleConstant:
- case Int52Constant: {
- HashMap<FrozenValue*, Node*>& values = valuesFor(node->op());
- auto result = values.add(node->constant(), node);
- if (result.isNewEntry)
- node->origin = NodeOrigin();
- else {
- node->setReplacement(result.iterator->value);
- toFree.append(node);
- }
- break;
- }
- default:
- block->at(targetIndex++) = node;
- break;
- }
- }
- block->resize(targetIndex);
- }
-
- // Insert the constants into the root block.
- InsertionSet insertionSet(m_graph);
- auto insertConstants = [&] (const HashMap<FrozenValue*, Node*>& values) {
- for (auto& entry : values)
- insertionSet.insert(0, entry.value);
- };
- insertConstants(jsValues);
- insertConstants(doubleValues);
- insertConstants(int52Values);
- insertionSet.execute(m_graph.block(0));
-
- // Perform all of the substitutions. We want all instances of the removed constants to
- // point at their replacements.
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- for (Node* node : *block)
- m_graph.performSubstitution(node);
- }
-
- // And finally free the constants that we removed.
- for (Node* node : toFree)
- m_graph.m_allocator.free(node);
-
- return true;
- }
-};
-
-} // anonymous namespace
-
-bool performConstantHoisting(Graph& graph)
-{
- SamplingRegion samplingRegion("DFG Constant Hoisting Phase");
- return runPhase<ConstantHoistingPhase>(graph);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGConstantHoistingPhase.h b/Source/JavaScriptCore/dfg/DFGConstantHoistingPhase.h
deleted file mode 100644
index 5124f168e..000000000
--- a/Source/JavaScriptCore/dfg/DFGConstantHoistingPhase.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGConstantHoistingPhase_h
-#define DFGConstantHoistingPhase_h
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-class Graph;
-
-// Hoists all constants to the top of the root block.
-
-bool performConstantHoisting(Graph&);
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGConstantHoistingPhase_h
diff --git a/Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.cpp b/Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.cpp
index 18f6f5ebc..617bffd90 100644
--- a/Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.cpp
@@ -32,7 +32,7 @@
#include "DFGBlockInsertionSet.h"
#include "DFGGraph.h"
#include "DFGPhase.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include <wtf/HashMap.h>
namespace JSC { namespace DFG {
@@ -73,11 +73,9 @@ public:
private:
void breakCriticalEdge(BasicBlock* predecessor, BasicBlock** successor)
{
- // Note that we pass NaN for the count of the critical edge block, because we honestly
- // don't know its execution frequency.
- BasicBlock* pad = m_insertionSet.insertBefore(*successor, PNaN);
+ BasicBlock* pad = m_insertionSet.insertBefore(*successor);
pad->appendNode(
- m_graph, SpecNone, Jump, (*successor)->firstOrigin(), OpInfo(*successor));
+ m_graph, SpecNone, Jump, (*successor)->at(0)->codeOrigin, OpInfo(*successor));
pad->predecessors.append(predecessor);
(*successor)->replacePredecessor(predecessor, pad);
diff --git a/Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.h b/Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.h
index bc94f8256..d801c1250 100644
--- a/Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGCriticalEdgeBreakingPhase_h
#define DFGCriticalEdgeBreakingPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGDCEPhase.cpp b/Source/JavaScriptCore/dfg/DFGDCEPhase.cpp
index 5290f2422..36f7683a8 100644
--- a/Source/JavaScriptCore/dfg/DFGDCEPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGDCEPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,7 +32,7 @@
#include "DFGGraph.h"
#include "DFGInsertionSet.h"
#include "DFGPhase.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -48,34 +48,77 @@ public:
{
ASSERT(m_graph.m_form == ThreadedCPS || m_graph.m_form == SSA);
- m_graph.computeRefCounts();
-
- for (BasicBlock* block : m_graph.blocksInPreOrder())
- fixupBlock(block);
-
- cleanVariables(m_graph.m_arguments);
-
- // Just do a basic Phantom/Check clean-up.
- for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ // First reset the counts to 0 for all nodes.
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+ for (unsigned indexInBlock = block->size(); indexInBlock--;)
+ block->at(indexInBlock)->setRefCount(0);
+ for (unsigned phiIndex = block->phis.size(); phiIndex--;)
+ block->phis[phiIndex]->setRefCount(0);
+ }
+
+ // Now find the roots:
+ // - Nodes that are must-generate.
+ // - Nodes that are reachable from type checks.
+ // Set their ref counts to 1 and put them on the worklist.
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
- unsigned sourceIndex = 0;
- unsigned targetIndex = 0;
- while (sourceIndex < block->size()) {
- Node* node = block->at(sourceIndex++);
- switch (node->op()) {
- case Check:
- case Phantom:
- if (node->children.isEmpty())
+ for (unsigned indexInBlock = block->size(); indexInBlock--;) {
+ Node* node = block->at(indexInBlock);
+ DFG_NODE_DO_TO_CHILDREN(m_graph, node, findTypeCheckRoot);
+ if (!(node->flags() & NodeMustGenerate))
+ continue;
+ if (!node->postfixRef())
+ m_worklist.append(node);
+ }
+ }
+
+ while (!m_worklist.isEmpty()) {
+ while (!m_worklist.isEmpty()) {
+ Node* node = m_worklist.last();
+ m_worklist.removeLast();
+ ASSERT(node->shouldGenerate()); // It should not be on the worklist unless it's ref'ed.
+ DFG_NODE_DO_TO_CHILDREN(m_graph, node, countEdge);
+ }
+
+ if (m_graph.m_form == SSA) {
+ // Find Phi->Upsilon edges, which are represented as meta-data in the
+ // Upsilon.
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
continue;
- break;
- default:
- break;
+ for (unsigned nodeIndex = block->size(); nodeIndex--;) {
+ Node* node = block->at(nodeIndex);
+ if (node->op() != Upsilon)
+ continue;
+ if (node->shouldGenerate())
+ continue;
+ if (node->phi()->shouldGenerate())
+ countNode(node);
+ }
}
- block->at(targetIndex++) = node;
}
- block->resize(targetIndex);
+ }
+
+ if (m_graph.m_form == SSA) {
+ // Need to process the graph in reverse DFS order, so that we get to the uses
+ // of a node before we get to the node itself.
+ Vector<BasicBlock*> depthFirst;
+ m_graph.getBlocksInDepthFirstOrder(depthFirst);
+ for (unsigned i = depthFirst.size(); i--;)
+ fixupBlock(depthFirst[i]);
+ } else {
+ RELEASE_ASSERT(m_graph.m_form == ThreadedCPS);
+
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex)
+ fixupBlock(m_graph.block(blockIndex));
+
+ cleanVariables(m_graph.m_arguments);
}
m_graph.m_refCountState = ExactRefCount;
@@ -84,16 +127,51 @@ public:
}
private:
+ void findTypeCheckRoot(Node*, Edge edge)
+ {
+ // We may have an "unproved" untyped use for code that is unreachable. The CFA
+ // will just not have gotten around to it.
+ if (edge.willNotHaveCheck())
+ return;
+ if (!edge->postfixRef())
+ m_worklist.append(edge.node());
+ }
+
+ void countNode(Node* node)
+ {
+ if (node->postfixRef())
+ return;
+ m_worklist.append(node);
+ }
+
+ void countEdge(Node*, Edge edge)
+ {
+ // Don't count edges that are already counted for their type checks.
+ if (edge.willHaveCheck())
+ return;
+ countNode(edge.node());
+ }
+
void fixupBlock(BasicBlock* block)
{
if (!block)
return;
-
- if (m_graph.m_form == ThreadedCPS) {
+
+ switch (m_graph.m_form) {
+ case SSA:
+ break;
+
+ case ThreadedCPS: {
+ // Clean up variable links for the block. We need to do this before the actual DCE
+ // because we need to see GetLocals, so we can bypass them in situations where the
+ // vars-at-tail point to a GetLocal, the GetLocal is dead, but the Phi it points
+ // to is alive.
+
for (unsigned phiIndex = 0; phiIndex < block->phis.size(); ++phiIndex) {
- Node* phi = block->phis[phiIndex];
- if (!phi->shouldGenerate()) {
- m_graph.m_allocator.free(phi);
+ if (!block->phis[phiIndex]->shouldGenerate()) {
+ // FIXME: We could actually free nodes here. Except that it probably
+ // doesn't matter, since we don't add any nodes after this phase.
+ // https://bugs.webkit.org/show_bug.cgi?id=126239
block->phis[phiIndex--] = block->phis.last();
block->phis.removeLast();
}
@@ -101,37 +179,75 @@ private:
cleanVariables(block->variablesAtHead);
cleanVariables(block->variablesAtTail);
+ break;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return;
}
- // This has to be a forward loop because we are using the insertion set.
- for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
+ for (unsigned indexInBlock = block->size(); indexInBlock--;) {
Node* node = block->at(indexInBlock);
if (node->shouldGenerate())
continue;
- if (node->flags() & NodeHasVarArgs) {
- for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
- Edge edge = m_graph.m_varArgChildren[childIdx];
-
- if (!edge || edge.willNotHaveCheck())
- continue;
-
- m_insertionSet.insertNode(indexInBlock, SpecNone, Check, node->origin, edge);
+ switch (node->op()) {
+ case MovHint: {
+ ASSERT(node->child1().useKind() == UntypedUse);
+ if (!node->child1()->shouldGenerate()) {
+ node->setOpAndDefaultFlags(ZombieHint);
+ node->child1() = Edge();
+ break;
}
+ node->setOpAndDefaultFlags(MovHint);
+ break;
+ }
- node->setOpAndDefaultFlags(Check);
- node->children.reset();
- node->setRefCount(1);
- continue;
+ case ZombieHint: {
+ // Currently we assume that DCE runs only once.
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
}
- node->remove();
- node->setRefCount(1);
+ default: {
+ if (node->flags() & NodeHasVarArgs) {
+ for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
+ Edge edge = m_graph.m_varArgChildren[childIdx];
+
+ if (!edge || edge.willNotHaveCheck())
+ continue;
+
+ m_insertionSet.insertNode(indexInBlock, SpecNone, Phantom, node->codeOrigin, edge);
+ }
+
+ node->convertToPhantomUnchecked();
+ node->children.reset();
+ node->setRefCount(1);
+ break;
+ }
+
+ node->convertToPhantom();
+ eliminateIrrelevantPhantomChildren(node);
+ node->setRefCount(1);
+ break;
+ } }
}
m_insertionSet.execute(block);
}
+ void eliminateIrrelevantPhantomChildren(Node* node)
+ {
+ for (unsigned i = 0; i < AdjacencyList::Size; ++i) {
+ Edge edge = node->children.child(i);
+ if (!edge)
+ continue;
+ if (edge.willNotHaveCheck())
+ node->children.removeEdge(i--);
+ }
+ }
+
template<typename VariablesVectorType>
void cleanVariables(VariablesVectorType& variables)
{
@@ -139,12 +255,21 @@ private:
Node* node = variables[i];
if (!node)
continue;
- if (node->op() != Check && node->shouldGenerate())
+ if (node->op() != Phantom && node->shouldGenerate())
continue;
- variables[i] = nullptr;
+ if (node->op() == GetLocal) {
+ node = node->child1().node();
+ ASSERT(node->op() == Phi || node->op() == SetArgument);
+ if (node->shouldGenerate()) {
+ variables[i] = node;
+ continue;
+ }
+ }
+ variables[i] = 0;
}
}
+ Vector<Node*, 128> m_worklist;
InsertionSet m_insertionSet;
};
diff --git a/Source/JavaScriptCore/dfg/DFGDCEPhase.h b/Source/JavaScriptCore/dfg/DFGDCEPhase.h
index b5e31ddd2..2bb991306 100644
--- a/Source/JavaScriptCore/dfg/DFGDCEPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGDCEPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGDCEPhase_h
#define DFGDCEPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
diff --git a/Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.cpp b/Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.cpp
index d3e8eac1c..f6587f47f 100644
--- a/Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.cpp
+++ b/Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,19 +29,11 @@
#if ENABLE(DFG_JIT)
#include "CodeBlock.h"
-#include "JSCInlines.h"
namespace JSC { namespace DFG {
-DesiredIdentifiers::DesiredIdentifiers()
- : m_codeBlock(nullptr)
- , m_didProcessIdentifiers(false)
-{
-}
-
DesiredIdentifiers::DesiredIdentifiers(CodeBlock* codeBlock)
: m_codeBlock(codeBlock)
- , m_didProcessIdentifiers(false)
{
}
@@ -54,28 +46,14 @@ unsigned DesiredIdentifiers::numberOfIdentifiers()
return m_codeBlock->numberOfIdentifiers() + m_addedIdentifiers.size();
}
-unsigned DesiredIdentifiers::ensure(UniquedStringImpl* rep)
+void DesiredIdentifiers::addLazily(StringImpl* rep)
{
- if (!m_didProcessIdentifiers) {
- // Do this now instead of the constructor so that we don't pay the price on the main
- // thread. Also, not all compilations need to call ensure().
- for (unsigned index = m_codeBlock->numberOfIdentifiers(); index--;)
- m_identifierNumberForName.add(m_codeBlock->identifier(index).impl(), index);
- m_didProcessIdentifiers = true;
- }
-
- auto addResult = m_identifierNumberForName.add(rep, numberOfIdentifiers());
- unsigned result = addResult.iterator->value;
- if (addResult.isNewEntry) {
- m_addedIdentifiers.append(rep);
- ASSERT(at(result) == rep);
- }
- return result;
+ m_addedIdentifiers.append(rep);
}
-UniquedStringImpl* DesiredIdentifiers::at(unsigned index) const
+StringImpl* DesiredIdentifiers::at(unsigned index) const
{
- UniquedStringImpl* result;
+ StringImpl* result;
if (index < m_codeBlock->numberOfIdentifiers())
result = m_codeBlock->identifier(index).impl();
else
@@ -87,9 +65,9 @@ UniquedStringImpl* DesiredIdentifiers::at(unsigned index) const
void DesiredIdentifiers::reallyAdd(VM& vm, CommonData* commonData)
{
for (unsigned i = 0; i < m_addedIdentifiers.size(); ++i) {
- auto rep = m_addedIdentifiers[i];
+ StringImpl* rep = m_addedIdentifiers[i];
ASSERT(rep->hasAtLeastOneRef());
- commonData->dfgIdentifiers.append(Identifier::fromUid(&vm, rep));
+ commonData->dfgIdentifiers.append(Identifier(&vm, rep));
}
}
diff --git a/Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.h b/Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.h
index d8587dc6d..a41f230b3 100644
--- a/Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.h
+++ b/Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGDesiredIdentifiers_h
#define DFGDesiredIdentifiers_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "Identifier.h"
@@ -40,24 +42,21 @@ class CommonData;
class DesiredIdentifiers {
public:
- DesiredIdentifiers();
DesiredIdentifiers(CodeBlock*);
~DesiredIdentifiers();
unsigned numberOfIdentifiers();
- unsigned ensure(UniquedStringImpl*);
+ void addLazily(StringImpl*);
- UniquedStringImpl* at(unsigned index) const;
+ StringImpl* at(unsigned index) const;
- UniquedStringImpl* operator[](unsigned index) const { return at(index); }
+ StringImpl* operator[](unsigned index) const { return at(index); }
void reallyAdd(VM&, CommonData*);
private:
CodeBlock* m_codeBlock;
- Vector<UniquedStringImpl*> m_addedIdentifiers;
- HashMap<UniquedStringImpl*, unsigned> m_identifierNumberForName;
- bool m_didProcessIdentifiers;
+ Vector<StringImpl*> m_addedIdentifiers;
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGTransition.cpp b/Source/JavaScriptCore/dfg/DFGDesiredStructureChains.cpp
index 80d9b994b..04bd5f01f 100644
--- a/Source/JavaScriptCore/dfg/DFGTransition.cpp
+++ b/Source/JavaScriptCore/dfg/DFGDesiredStructureChains.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -24,22 +24,22 @@
*/
#include "config.h"
-#include "DFGTransition.h"
+#include "DFGDesiredStructureChains.h"
#if ENABLE(DFG_JIT)
-#include "JSCInlines.h"
-
namespace JSC { namespace DFG {
-void Transition::dumpInContext(PrintStream& out, DumpContext* context) const
-{
- out.print(pointerDumpInContext(previous, context), " -> ", pointerDumpInContext(next, context));
-}
+DesiredStructureChains::DesiredStructureChains() { }
+DesiredStructureChains::~DesiredStructureChains() { }
-void Transition::dump(PrintStream& out) const
+bool DesiredStructureChains::areStillValid() const
{
- dumpInContext(out, 0);
+ for (unsigned i = 0; i < m_vector.size(); ++i) {
+ if (!m_vector[i]->isStillValid())
+ return false;
+ }
+ return true;
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGGraphSafepoint.h b/Source/JavaScriptCore/dfg/DFGDesiredStructureChains.h
index 1759b6e12..3c20194d7 100644
--- a/Source/JavaScriptCore/dfg/DFGGraphSafepoint.h
+++ b/Source/JavaScriptCore/dfg/DFGDesiredStructureChains.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -23,29 +23,36 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DFGGraphSafepoint_h
-#define DFGGraphSafepoint_h
+#ifndef DFGDesiredStructureChains_h
+#define DFGDesiredStructureChains_h
+
+#include <wtf/Platform.h>
#if ENABLE(DFG_JIT)
-#include "DFGSafepoint.h"
+#include "IntendedStructureChain.h"
+#include <wtf/Vector.h>
namespace JSC { namespace DFG {
-class Graph;
-
-class GraphSafepoint {
+class DesiredStructureChains {
public:
- GraphSafepoint(Graph&, Safepoint::Result&);
- ~GraphSafepoint();
+ DesiredStructureChains();
+ ~DesiredStructureChains();
+
+ void addLazily(PassRefPtr<IntendedStructureChain> chain)
+ {
+ m_vector.append(chain);
+ }
+ bool areStillValid() const;
private:
- Safepoint m_safepoint;
+ Vector<RefPtr<IntendedStructureChain>> m_vector;
};
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
-#endif // DFGGraphSafepoint_h
+#endif // DFGDesiredStructureChains_h
diff --git a/Source/JavaScriptCore/dfg/DFGDesiredTransitions.cpp b/Source/JavaScriptCore/dfg/DFGDesiredTransitions.cpp
index 91134ccb8..0cfa00f6f 100644
--- a/Source/JavaScriptCore/dfg/DFGDesiredTransitions.cpp
+++ b/Source/JavaScriptCore/dfg/DFGDesiredTransitions.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,13 +24,13 @@
*/
#include "config.h"
-#include "DFGDesiredTransitions.h"
#if ENABLE(DFG_JIT)
+#include "DFGDesiredTransitions.h"
+
#include "CodeBlock.h"
#include "DFGCommonData.h"
-#include "JSCInlines.h"
namespace JSC { namespace DFG {
@@ -51,13 +51,6 @@ void DesiredTransition::reallyAdd(VM& vm, CommonData* common)
m_oldStructure, m_newStructure));
}
-void DesiredTransition::visitChildren(SlotVisitor& visitor)
-{
- visitor.appendUnbarrieredPointer(&m_codeOriginOwner);
- visitor.appendUnbarrieredPointer(&m_oldStructure);
- visitor.appendUnbarrieredPointer(&m_newStructure);
-}
-
DesiredTransitions::DesiredTransitions()
{
}
@@ -77,12 +70,6 @@ void DesiredTransitions::reallyAdd(VM& vm, CommonData* common)
m_transitions[i].reallyAdd(vm, common);
}
-void DesiredTransitions::visitChildren(SlotVisitor& visitor)
-{
- for (unsigned i = 0; i < m_transitions.size(); i++)
- m_transitions[i].visitChildren(visitor);
-}
-
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGDesiredTransitions.h b/Source/JavaScriptCore/dfg/DFGDesiredTransitions.h
index addaf3ecd..246a81062 100644
--- a/Source/JavaScriptCore/dfg/DFGDesiredTransitions.h
+++ b/Source/JavaScriptCore/dfg/DFGDesiredTransitions.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,7 +34,6 @@ namespace JSC {
class CodeBlock;
class ScriptExecutable;
-class SlotVisitor;
class Structure;
class VM;
@@ -47,8 +46,6 @@ public:
DesiredTransition(CodeBlock*, ScriptExecutable*, Structure*, Structure*);
void reallyAdd(VM&, CommonData*);
-
- void visitChildren(SlotVisitor&);
private:
CodeBlock* m_codeBlock;
@@ -64,7 +61,6 @@ public:
void addLazily(CodeBlock*, ScriptExecutable*, Structure*, Structure*);
void reallyAdd(VM&, CommonData*);
- void visitChildren(SlotVisitor&);
private:
Vector<DesiredTransition> m_transitions;
diff --git a/Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.cpp b/Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.cpp
index 6bea70420..80400cb26 100644
--- a/Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.cpp
+++ b/Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,14 +30,13 @@
#include "ArrayBufferNeuteringWatchpoint.h"
#include "CodeBlock.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
void ArrayBufferViewWatchpointAdaptor::add(
- CodeBlock* codeBlock, JSArrayBufferView* view, CommonData& common)
+ CodeBlock* codeBlock, JSArrayBufferView* view, Watchpoint* watchpoint)
{
- Watchpoint* watchpoint = common.watchpoints.add(codeBlock);
ArrayBufferNeuteringWatchpoint* neuteringWatchpoint =
ArrayBufferNeuteringWatchpoint::create(*codeBlock->vm());
neuteringWatchpoint->set()->add(watchpoint);
@@ -45,26 +44,6 @@ void ArrayBufferViewWatchpointAdaptor::add(
codeBlock->vm()->heap.addReference(neuteringWatchpoint, view->buffer());
}
-void InferredValueAdaptor::add(
- CodeBlock* codeBlock, InferredValue* inferredValue, CommonData& common)
-{
- codeBlock->addConstant(inferredValue); // For common users, it doesn't really matter if it's weak or not. If references to it go away, we go away, too.
- inferredValue->add(common.watchpoints.add(codeBlock));
-}
-
-void AdaptiveStructureWatchpointAdaptor::add(
- CodeBlock* codeBlock, const ObjectPropertyCondition& key, CommonData& common)
-{
- switch (key.kind()) {
- case PropertyCondition::Equivalence:
- common.adaptiveInferredPropertyValueWatchpoints.add(key, codeBlock)->install();
- break;
- default:
- common.adaptiveStructureWatchpoints.add(key, codeBlock)->install();
- break;
- }
-}
-
DesiredWatchpoints::DesiredWatchpoints() { }
DesiredWatchpoints::~DesiredWatchpoints() { }
@@ -78,55 +57,33 @@ void DesiredWatchpoints::addLazily(InlineWatchpointSet& set)
m_inlineSets.addLazily(&set);
}
-void DesiredWatchpoints::addLazily(InferredValue* inferredValue)
-{
- m_inferredValues.addLazily(inferredValue);
-}
-
void DesiredWatchpoints::addLazily(JSArrayBufferView* view)
{
m_bufferViews.addLazily(view);
}
-void DesiredWatchpoints::addLazily(const ObjectPropertyCondition& key)
+void DesiredWatchpoints::addLazily(CodeOrigin codeOrigin, ExitKind exitKind, WatchpointSet* set)
{
- m_adaptiveStructureSets.addLazily(key);
+ m_sets.addLazily(codeOrigin, exitKind, set);
}
-bool DesiredWatchpoints::consider(Structure* structure)
+void DesiredWatchpoints::addLazily(CodeOrigin codeOrigin, ExitKind exitKind, InlineWatchpointSet& set)
{
- if (!structure->dfgShouldWatch())
- return false;
- addLazily(structure->transitionWatchpointSet());
- return true;
+ m_inlineSets.addLazily(codeOrigin, exitKind, &set);
}
void DesiredWatchpoints::reallyAdd(CodeBlock* codeBlock, CommonData& commonData)
{
m_sets.reallyAdd(codeBlock, commonData);
m_inlineSets.reallyAdd(codeBlock, commonData);
- m_inferredValues.reallyAdd(codeBlock, commonData);
m_bufferViews.reallyAdd(codeBlock, commonData);
- m_adaptiveStructureSets.reallyAdd(codeBlock, commonData);
}
bool DesiredWatchpoints::areStillValid() const
{
return m_sets.areStillValid()
&& m_inlineSets.areStillValid()
- && m_inferredValues.areStillValid()
- && m_bufferViews.areStillValid()
- && m_adaptiveStructureSets.areStillValid();
-}
-
-void DesiredWatchpoints::dumpInContext(PrintStream& out, DumpContext* context) const
-{
- out.print("Desired watchpoints:\n");
- out.print(" Watchpoint sets: ", inContext(m_sets, context), "\n");
- out.print(" Inline watchpoint sets: ", inContext(m_inlineSets, context), "\n");
- out.print(" Inferred values: ", inContext(m_inferredValues, context), "\n");
- out.print(" Buffer views: ", inContext(m_bufferViews, context), "\n");
- out.print(" Object property conditions: ", inContext(m_adaptiveStructureSets, context), "\n");
+ && m_bufferViews.areStillValid();
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.h b/Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.h
index bc86f43cf..64e88c764 100644
--- a/Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.h
+++ b/Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,75 +26,66 @@
#ifndef DFGDesiredWatchpoints_h
#define DFGDesiredWatchpoints_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "CodeOrigin.h"
#include "DFGCommonData.h"
-#include "InferredValue.h"
#include "JSArrayBufferView.h"
-#include "ObjectPropertyCondition.h"
#include "Watchpoint.h"
-#include <wtf/CommaPrinter.h>
+#include <wtf/HashMap.h>
#include <wtf/HashSet.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/Vector.h>
namespace JSC { namespace DFG {
-class Graph;
-
-template<typename T>
-struct SetPointerAdaptor {
- static void add(CodeBlock* codeBlock, T set, CommonData& common)
+template<typename WatchpointSetType>
+struct WatchpointForGenericWatchpointSet {
+ WatchpointForGenericWatchpointSet()
+ : m_exitKind(ExitKindUnset)
+ , m_set(0)
{
- return set->add(common.watchpoints.add(codeBlock));
}
- static bool hasBeenInvalidated(T set) { return set->hasBeenInvalidated(); }
- static void dumpInContext(PrintStream& out, T set, DumpContext*)
+
+ WatchpointForGenericWatchpointSet(
+ CodeOrigin codeOrigin, ExitKind exitKind, WatchpointSetType* set)
+ : m_codeOrigin(codeOrigin)
+ , m_exitKind(exitKind)
+ , m_set(set)
{
- out.print(RawPointer(set));
}
+
+ CodeOrigin m_codeOrigin;
+ ExitKind m_exitKind;
+ WatchpointSetType* m_set;
};
-struct InferredValueAdaptor {
- static void add(CodeBlock*, InferredValue*, CommonData&);
- static bool hasBeenInvalidated(InferredValue* inferredValue)
- {
- return inferredValue->hasBeenInvalidated();
- }
- static void dumpInContext(PrintStream& out, InferredValue* inferredValue, DumpContext*)
+template<typename T>
+struct GenericSetAdaptor {
+ static void add(CodeBlock*, T* set, Watchpoint* watchpoint)
{
- out.print(RawPointer(inferredValue));
+ return set->add(watchpoint);
}
+ static bool hasBeenInvalidated(T* set) { return set->hasBeenInvalidated(); }
};
struct ArrayBufferViewWatchpointAdaptor {
- static void add(CodeBlock*, JSArrayBufferView*, CommonData&);
+ static void add(CodeBlock*, JSArrayBufferView*, Watchpoint*);
static bool hasBeenInvalidated(JSArrayBufferView* view)
{
- return !view->length();
- }
- static void dumpInContext(PrintStream& out, JSArrayBufferView* view, DumpContext* context)
- {
- out.print(inContext(JSValue(view), context));
- }
-};
-
-struct AdaptiveStructureWatchpointAdaptor {
- static void add(CodeBlock*, const ObjectPropertyCondition&, CommonData&);
- static bool hasBeenInvalidated(const ObjectPropertyCondition& key)
- {
- return !key.isWatchable();
- }
- static void dumpInContext(
- PrintStream& out, const ObjectPropertyCondition& key, DumpContext* context)
- {
- out.print(inContext(key, context));
+ bool result = !view->length();
+ WTF::loadLoadFence();
+ return result;
}
};
-template<typename WatchpointSetType, typename Adaptor = SetPointerAdaptor<WatchpointSetType>>
+template<typename WatchpointSetType, typename Adaptor = GenericSetAdaptor<WatchpointSetType>>
class GenericDesiredWatchpoints {
+ WTF_MAKE_NONCOPYABLE(GenericDesiredWatchpoints);
#if !ASSERT_DISABLED
- typedef HashMap<WatchpointSetType, bool> StateMap;
+ typedef HashMap<WatchpointSetType*, bool> StateMap;
#endif
public:
GenericDesiredWatchpoints()
@@ -102,47 +93,95 @@ public:
{
}
- void addLazily(const WatchpointSetType& set)
+ void addLazily(WatchpointSetType* set)
{
m_sets.add(set);
}
+ void addLazily(CodeOrigin codeOrigin, ExitKind exitKind, WatchpointSetType* set)
+ {
+ m_profiledWatchpoints.append(
+ WatchpointForGenericWatchpointSet<WatchpointSetType>(codeOrigin, exitKind, set));
+ }
+
void reallyAdd(CodeBlock* codeBlock, CommonData& common)
{
RELEASE_ASSERT(!m_reallyAdded);
- for (auto& set : m_sets)
- Adaptor::add(codeBlock, set, common);
+ typename HashSet<WatchpointSetType*>::iterator iter = m_sets.begin();
+ typename HashSet<WatchpointSetType*>::iterator end = m_sets.end();
+ for (; iter != end; ++iter) {
+ common.watchpoints.append(CodeBlockJettisoningWatchpoint(codeBlock));
+ Adaptor::add(codeBlock, *iter, &common.watchpoints.last());
+ }
+
+ for (unsigned i = m_profiledWatchpoints.size(); i--;) {
+ WatchpointForGenericWatchpointSet<WatchpointSetType> watchpoint =
+ m_profiledWatchpoints[i];
+ common.profiledWatchpoints.append(
+ ProfiledCodeBlockJettisoningWatchpoint(watchpoint.m_codeOrigin, watchpoint.m_exitKind, codeBlock));
+ Adaptor::add(codeBlock, watchpoint.m_set, &common.profiledWatchpoints.last());
+ }
m_reallyAdded = true;
}
bool areStillValid() const
{
- for (auto& set : m_sets) {
- if (Adaptor::hasBeenInvalidated(set))
+ typename HashSet<WatchpointSetType*>::iterator iter = m_sets.begin();
+ typename HashSet<WatchpointSetType*>::iterator end = m_sets.end();
+ for (; iter != end; ++iter) {
+ if (Adaptor::hasBeenInvalidated(*iter))
+ return false;
+ }
+
+ for (unsigned i = m_profiledWatchpoints.size(); i--;) {
+ if (Adaptor::hasBeenInvalidated(m_profiledWatchpoints[i].m_set))
return false;
}
return true;
}
- bool isWatched(const WatchpointSetType& set) const
+#if ASSERT_DISABLED
+ bool isStillValid(WatchpointSetType* set)
{
- return m_sets.contains(set);
+ return !Adaptor::hasBeenInvalidated(set);
}
-
- void dumpInContext(PrintStream& out, DumpContext* context) const
+
+ bool shouldAssumeMixedState(WatchpointSetType*)
{
- CommaPrinter comma;
- for (const WatchpointSetType& entry : m_sets) {
- out.print(comma);
- Adaptor::dumpInContext(out, entry, context);
- }
+ return true;
+ }
+#else
+ bool isStillValid(WatchpointSetType* set)
+ {
+ bool result = !Adaptor::hasBeenInvalidated(set);
+ m_firstKnownState.add(set, result);
+ return result;
+ }
+
+ bool shouldAssumeMixedState(WatchpointSetType* set)
+ {
+ typename StateMap::iterator iter = m_firstKnownState.find(set);
+ if (iter == m_firstKnownState.end())
+ return false;
+
+ return iter->value != !Adaptor::hasBeenInvalidated(set);
+ }
+#endif
+
+ bool isValidOrMixed(WatchpointSetType* set)
+ {
+ return isStillValid(set) || shouldAssumeMixedState(set);
}
private:
- HashSet<WatchpointSetType> m_sets;
+ Vector<WatchpointForGenericWatchpointSet<WatchpointSetType>> m_profiledWatchpoints;
+ HashSet<WatchpointSetType*> m_sets;
+#if !ASSERT_DISABLED
+ StateMap m_firstKnownState;
+#endif
bool m_reallyAdded;
};
@@ -153,49 +192,55 @@ public:
void addLazily(WatchpointSet*);
void addLazily(InlineWatchpointSet&);
- void addLazily(InferredValue*);
void addLazily(JSArrayBufferView*);
-
- // It's recommended that you don't call this directly. Use Graph::watchCondition(), which does
- // the required GC magic as well as some other bookkeeping.
- void addLazily(const ObjectPropertyCondition&);
-
- bool consider(Structure*);
+ void addLazily(CodeOrigin, ExitKind, WatchpointSet*);
+ void addLazily(CodeOrigin, ExitKind, InlineWatchpointSet&);
void reallyAdd(CodeBlock*, CommonData&);
bool areStillValid() const;
- bool isWatched(WatchpointSet* set)
+ bool isStillValid(WatchpointSet* set)
{
- return m_sets.isWatched(set);
+ return m_sets.isStillValid(set);
}
- bool isWatched(InlineWatchpointSet& set)
+ bool isStillValid(InlineWatchpointSet& set)
{
- return m_inlineSets.isWatched(&set);
+ return m_inlineSets.isStillValid(&set);
}
- bool isWatched(InferredValue* inferredValue)
+ bool isStillValid(JSArrayBufferView* view)
{
- return m_inferredValues.isWatched(inferredValue);
+ return m_bufferViews.isStillValid(view);
}
- bool isWatched(JSArrayBufferView* view)
+ bool shouldAssumeMixedState(WatchpointSet* set)
{
- return m_bufferViews.isWatched(view);
+ return m_sets.shouldAssumeMixedState(set);
}
- bool isWatched(const ObjectPropertyCondition& key)
+ bool shouldAssumeMixedState(InlineWatchpointSet& set)
{
- return m_adaptiveStructureSets.isWatched(key);
+ return m_inlineSets.shouldAssumeMixedState(&set);
+ }
+ bool shouldAssumeMixedState(JSArrayBufferView* view)
+ {
+ return m_bufferViews.shouldAssumeMixedState(view);
+ }
+ bool isValidOrMixed(WatchpointSet* set)
+ {
+ return m_sets.isValidOrMixed(set);
+ }
+ bool isValidOrMixed(InlineWatchpointSet& set)
+ {
+ return m_inlineSets.isValidOrMixed(&set);
+ }
+ bool isValidOrMixed(JSArrayBufferView* view)
+ {
+ return m_bufferViews.isValidOrMixed(view);
}
-
- void dumpInContext(PrintStream&, DumpContext*) const;
- void dump(PrintStream&) const;
private:
- GenericDesiredWatchpoints<WatchpointSet*> m_sets;
- GenericDesiredWatchpoints<InlineWatchpointSet*> m_inlineSets;
- GenericDesiredWatchpoints<InferredValue*, InferredValueAdaptor> m_inferredValues;
- GenericDesiredWatchpoints<JSArrayBufferView*, ArrayBufferViewWatchpointAdaptor> m_bufferViews;
- GenericDesiredWatchpoints<ObjectPropertyCondition, AdaptiveStructureWatchpointAdaptor> m_adaptiveStructureSets;
+ GenericDesiredWatchpoints<WatchpointSet> m_sets;
+ GenericDesiredWatchpoints<InlineWatchpointSet> m_inlineSets;
+ GenericDesiredWatchpoints<JSArrayBufferView, ArrayBufferViewWatchpointAdaptor> m_bufferViews;
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.cpp b/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.cpp
index 0df960651..a8376ea8a 100644
--- a/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.cpp
+++ b/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,21 +24,16 @@
*/
#include "config.h"
-#include "DFGDesiredWeakReferences.h"
#if ENABLE(DFG_JIT)
+#include "DFGDesiredWeakReferences.h"
+
#include "CodeBlock.h"
#include "DFGCommonData.h"
-#include "JSCInlines.h"
namespace JSC { namespace DFG {
-DesiredWeakReferences::DesiredWeakReferences()
- : m_codeBlock(nullptr)
-{
-}
-
DesiredWeakReferences::DesiredWeakReferences(CodeBlock* codeBlock)
: m_codeBlock(codeBlock)
{
@@ -50,40 +45,17 @@ DesiredWeakReferences::~DesiredWeakReferences()
void DesiredWeakReferences::addLazily(JSCell* cell)
{
- if (cell)
- m_references.add(cell);
-}
-
-void DesiredWeakReferences::addLazily(JSValue value)
-{
- if (value.isCell())
- addLazily(value.asCell());
-}
-
-bool DesiredWeakReferences::contains(JSCell* cell)
-{
- return m_references.contains(cell);
+ m_references.append(cell);
}
void DesiredWeakReferences::reallyAdd(VM& vm, CommonData* common)
{
- for (JSCell* target : m_references) {
- if (Structure* structure = jsDynamicCast<Structure*>(target)) {
- common->weakStructureReferences.append(
- WriteBarrier<Structure>(vm, m_codeBlock->ownerExecutable(), structure));
- } else {
- common->weakReferences.append(
- WriteBarrier<JSCell>(vm, m_codeBlock->ownerExecutable(), target));
- }
+ for (unsigned i = 0; i < m_references.size(); i++) {
+ JSCell* target = m_references[i];
+ common->weakReferences.append(WriteBarrier<JSCell>(vm, m_codeBlock->ownerExecutable(), target));
}
}
-void DesiredWeakReferences::visitChildren(SlotVisitor& visitor)
-{
- for (JSCell* target : m_references)
- visitor.appendUnbarrieredPointer(&target);
-}
-
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.h b/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.h
index 303b8df2a..981e752ea 100644
--- a/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.h
+++ b/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,7 +26,7 @@
#ifndef DFGDesiredWeakReferences_h
#define DFGDesiredWeakReferences_h
-#include <wtf/HashSet.h>
+#include <wtf/Vector.h>
#if ENABLE(DFG_JIT)
@@ -34,8 +34,6 @@ namespace JSC {
class CodeBlock;
class JSCell;
-class JSValue;
-class SlotVisitor;
class VM;
namespace DFG {
@@ -44,21 +42,15 @@ class CommonData;
class DesiredWeakReferences {
public:
- DesiredWeakReferences();
DesiredWeakReferences(CodeBlock*);
~DesiredWeakReferences();
void addLazily(JSCell*);
- void addLazily(JSValue);
- bool contains(JSCell*);
-
void reallyAdd(VM&, CommonData*);
-
- void visitChildren(SlotVisitor&);
private:
CodeBlock* m_codeBlock;
- HashSet<JSCell*> m_references;
+ Vector<JSCell*> m_references;
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGStoreBarrierInsertionPhase.h b/Source/JavaScriptCore/dfg/DFGDesiredWriteBarriers.cpp
index 352759e95..bf1a24375 100644
--- a/Source/JavaScriptCore/dfg/DFGStoreBarrierInsertionPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGDesiredWriteBarriers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,29 +23,65 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DFGStoreBarrierInsertionPhase_h
-#define DFGStoreBarrierInsertionPhase_h
+#include "config.h"
#if ENABLE(DFG_JIT)
+#include "DFGDesiredWriteBarriers.h"
+
+#include "CodeBlock.h"
+#include "JSCJSValueInlines.h"
+
namespace JSC { namespace DFG {
-class Graph;
+DesiredWriteBarrier::DesiredWriteBarrier(Type type, CodeBlock* codeBlock, unsigned index, JSCell* owner)
+ : m_owner(owner)
+ , m_type(type)
+ , m_codeBlock(codeBlock)
+{
+ m_which.index = index;
+}
-// Inserts store barriers in a block-local manner without consulting the abstract interpreter.
-// Uses a simple epoch-based analysis to avoid inserting redundant barriers. This phase requires
-// that we are not in SSA.
-bool performFastStoreBarrierInsertion(Graph&);
+DesiredWriteBarrier::DesiredWriteBarrier(Type type, CodeBlock* codeBlock, InlineCallFrame* inlineCallFrame, JSCell* owner)
+ : m_owner(owner)
+ , m_type(type)
+ , m_codeBlock(codeBlock)
+{
+ m_which.inlineCallFrame = inlineCallFrame;
+}
-// Inserts store barriers using a global analysis and consults the abstract interpreter. Uses a
-// simple epoch-based analysis to avoid inserting redundant barriers, but only propagates "same
-// epoch as current" property from one block to the next. This phase requires SSA. This phase
-// also requires having valid AI and liveness.
-bool performGlobalStoreBarrierInsertion(Graph&);
+void DesiredWriteBarrier::trigger(VM& vm)
+{
+ switch (m_type) {
+ case ConstantType: {
+ WriteBarrier<Unknown>& barrier = m_codeBlock->constants()[m_which.index];
+ barrier.set(vm, m_owner, barrier.get());
+ return;
+ }
-} } // namespace JSC::DFG
+ case InlineCallFrameExecutableType: {
+ InlineCallFrame* inlineCallFrame = m_which.inlineCallFrame;
+ WriteBarrier<ScriptExecutable>& executable = inlineCallFrame->executable;
+ executable.set(vm, m_owner, executable.get());
+ return;
+ } }
+ RELEASE_ASSERT_NOT_REACHED();
+}
-#endif // ENABLE(DFG_JIT)
+DesiredWriteBarriers::DesiredWriteBarriers()
+{
+}
-#endif // DFGStoreBarrierInsertionPhase_h
+DesiredWriteBarriers::~DesiredWriteBarriers()
+{
+}
+void DesiredWriteBarriers::trigger(VM& vm)
+{
+ for (unsigned i = 0; i < m_barriers.size(); i++)
+ m_barriers[i].trigger(vm);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGDesiredWriteBarriers.h b/Source/JavaScriptCore/dfg/DFGDesiredWriteBarriers.h
new file mode 100644
index 000000000..cbbb2cb5e
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGDesiredWriteBarriers.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGDesiredWriteBarriers_h
+#define DFGDesiredWriteBarriers_h
+
+#include "WriteBarrier.h"
+#include <wtf/Vector.h>
+
+#if ENABLE(DFG_JIT)
+
+namespace JSC {
+
+class JSFunction;
+class ScriptExecutable;
+class VM;
+struct InlineCallFrame;
+
+namespace DFG {
+
+class DesiredWriteBarrier {
+public:
+ enum Type {
+ ConstantType,
+ InlineCallFrameExecutableType,
+ };
+ DesiredWriteBarrier(Type, CodeBlock*, unsigned index, JSCell* owner);
+ DesiredWriteBarrier(Type, CodeBlock*, InlineCallFrame*, JSCell* owner);
+
+ void trigger(VM&);
+
+private:
+ JSCell* m_owner;
+ Type m_type;
+ CodeBlock* m_codeBlock;
+ union {
+ unsigned index;
+ InlineCallFrame* inlineCallFrame;
+ } m_which;
+};
+
+class DesiredWriteBarriers {
+public:
+ DesiredWriteBarriers();
+ ~DesiredWriteBarriers();
+
+ DesiredWriteBarrier& add(DesiredWriteBarrier::Type type, CodeBlock* codeBlock, unsigned index, JSCell* owner)
+ {
+ m_barriers.append(DesiredWriteBarrier(type, codeBlock, index, owner));
+ return m_barriers.last();
+ }
+ DesiredWriteBarrier& add(DesiredWriteBarrier::Type type, CodeBlock* codeBlock, InlineCallFrame* inlineCallFrame, JSCell* owner)
+ {
+ m_barriers.append(DesiredWriteBarrier(type, codeBlock, inlineCallFrame, owner));
+ return m_barriers.last();
+ }
+
+ void trigger(VM&);
+
+private:
+ Vector<DesiredWriteBarrier> m_barriers;
+};
+
+inline void initializeLazyWriteBarrierForInlineCallFrameExecutable(DesiredWriteBarriers& barriers, WriteBarrier<ScriptExecutable>& barrier, CodeBlock* codeBlock, InlineCallFrame* inlineCallFrame, JSCell* owner, ScriptExecutable* value)
+{
+ DesiredWriteBarrier& desiredBarrier = barriers.add(DesiredWriteBarrier::InlineCallFrameExecutableType, codeBlock, inlineCallFrame, owner);
+ barrier = WriteBarrier<ScriptExecutable>(desiredBarrier, value);
+}
+
+inline void initializeLazyWriteBarrierForConstant(DesiredWriteBarriers& barriers, WriteBarrier<Unknown>& barrier, CodeBlock* codeBlock, unsigned index, JSCell* owner, JSValue value)
+{
+ DesiredWriteBarrier& desiredBarrier = barriers.add(DesiredWriteBarrier::ConstantType, codeBlock, index, owner);
+ barrier = WriteBarrier<Unknown>(desiredBarrier, value);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGDesiredWriteBarriers_h
diff --git a/Source/JavaScriptCore/dfg/DFGDisassembler.cpp b/Source/JavaScriptCore/dfg/DFGDisassembler.cpp
index e221c4c4f..0a06c02f5 100644
--- a/Source/JavaScriptCore/dfg/DFGDisassembler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGDisassembler.cpp
@@ -26,14 +26,11 @@
#include "config.h"
#include "DFGDisassembler.h"
-#if ENABLE(DFG_JIT)
+#if ENABLE(DFG_JIT) && ENABLE(DISASSEMBLER)
#include "CodeBlockWithJITType.h"
#include "DFGGraph.h"
#include "DFGJITCode.h"
-#include "JSCInlines.h"
-#include "LinkBuffer.h"
-#include "ProfilerDatabase.h"
#include <wtf/StdLibExtras.h>
namespace JSC { namespace DFG {
@@ -112,6 +109,8 @@ Vector<Disassembler::DumpedOp> Disassembler::createDumpList(LinkBuffer& linkBuff
append(result, out, previousOrigin);
Node* lastNodeForDisassembly = block->at(0);
for (size_t i = 0; i < block->size(); ++i) {
+ if (!block->at(i)->willHaveCodeGenOrOSR() && !Options::showAllDFGNodes())
+ continue;
MacroAssembler::Label currentLabel;
HashMap<Node*, MacroAssembler::Label>::iterator iter = m_labelForNode.find(block->at(i));
if (iter != m_labelForNode.end())
@@ -128,10 +127,10 @@ Vector<Disassembler::DumpedOp> Disassembler::createDumpList(LinkBuffer& linkBuff
}
dumpDisassembly(out, disassemblyPrefix, linkBuffer, previousLabel, currentLabel, lastNodeForDisassembly);
append(result, out, previousOrigin);
- previousOrigin = block->at(i)->origin.semantic;
+ previousOrigin = block->at(i)->codeOrigin;
if (m_graph.dumpCodeOrigin(out, prefix, lastNode, block->at(i), &m_dumpContext)) {
append(result, out, previousOrigin);
- previousOrigin = block->at(i)->origin.semantic;
+ previousOrigin = block->at(i)->codeOrigin;
}
m_graph.dump(out, prefix, block->at(i), &m_dumpContext);
lastNode = block->at(i);
@@ -173,4 +172,4 @@ void Disassembler::dumpDisassembly(PrintStream& out, const char* prefix, LinkBuf
} } // namespace JSC::DFG
-#endif // ENABLE(DFG_JIT)
+#endif // ENABLE(DFG_JIT) && ENABLE(DISASSEMBLER)
diff --git a/Source/JavaScriptCore/dfg/DFGDisassembler.h b/Source/JavaScriptCore/dfg/DFGDisassembler.h
index 7b31946f9..58163cb59 100644
--- a/Source/JavaScriptCore/dfg/DFGDisassembler.h
+++ b/Source/JavaScriptCore/dfg/DFGDisassembler.h
@@ -26,25 +26,24 @@
#ifndef DFGDisassembler_h
#define DFGDisassembler_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
-#include "CodeOrigin.h"
#include "DFGCommon.h"
#include "DumpContext.h"
+#include "LinkBuffer.h"
#include "MacroAssembler.h"
-#include "ProfilerCompilation.h"
#include <wtf/HashMap.h>
#include <wtf/StringPrintStream.h>
#include <wtf/Vector.h>
-namespace JSC {
-
-class LinkBuffer;
-
-namespace DFG {
+namespace JSC { namespace DFG {
class Graph;
+#if ENABLE(DISASSEMBLER)
+
class Disassembler {
WTF_MAKE_FAST_ALLOCATED;
public:
@@ -100,6 +99,25 @@ private:
MacroAssembler::Label m_endOfCode;
};
+#else // ENABLE(DISASSEMBLER)
+
+class Disassembler {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ Disassembler(Graph&) { }
+
+ void setStartOfCode(MacroAssembler::Label) { }
+ void setForBlockIndex(BlockIndex, MacroAssembler::Label) { }
+ void setForNode(Node*, MacroAssembler::Label) { }
+ void setEndOfMainPath(MacroAssembler::Label) { }
+ void setEndOfCode(MacroAssembler::Label) { }
+
+ void dump(LinkBuffer&) { }
+ void reportToProfiler(Profiler::Compilation*, LinkBuffer&) { }
+};
+
+#endif // ENABLE(DISASSEMBLER)
+
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGDoesGC.cpp b/Source/JavaScriptCore/dfg/DFGDoesGC.cpp
deleted file mode 100644
index e470b8dda..000000000
--- a/Source/JavaScriptCore/dfg/DFGDoesGC.cpp
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGDoesGC.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGClobberize.h"
-#include "DFGGraph.h"
-#include "DFGNode.h"
-#include "Operations.h"
-
-namespace JSC { namespace DFG {
-
-bool doesGC(Graph& graph, Node* node)
-{
- if (clobbersHeap(graph, node))
- return true;
-
- // Now consider nodes that don't clobber the world but that still may GC. This includes all
- // nodes. By convention we put world-clobbering nodes in the block of "false" cases but we can
- // put them anywhere.
- switch (node->op()) {
- case JSConstant:
- case DoubleConstant:
- case Int52Constant:
- case Identity:
- case GetCallee:
- case GetArgumentCount:
- case GetLocal:
- case SetLocal:
- case MovHint:
- case ZombieHint:
- case Phantom:
- case Upsilon:
- case Phi:
- case Flush:
- case PhantomLocal:
- case GetLocalUnlinked:
- case SetArgument:
- case BitAnd:
- case BitOr:
- case BitXor:
- case BitLShift:
- case BitRShift:
- case BitURShift:
- case ValueToInt32:
- case UInt32ToNumber:
- case DoubleAsInt32:
- case ArithAdd:
- case ArithClz32:
- case ArithSub:
- case ArithNegate:
- case ArithMul:
- case ArithIMul:
- case ArithDiv:
- case ArithMod:
- case ArithAbs:
- case ArithMin:
- case ArithMax:
- case ArithPow:
- case ArithSqrt:
- case ArithRound:
- case ArithFRound:
- case ArithSin:
- case ArithCos:
- case ArithLog:
- case ValueAdd:
- case GetById:
- case GetByIdFlush:
- case PutById:
- case PutByIdFlush:
- case PutByIdDirect:
- case CheckStructure:
- case GetExecutable:
- case GetButterfly:
- case CheckArray:
- case GetScope:
- case SkipScope:
- case GetClosureVar:
- case PutClosureVar:
- case GetGlobalVar:
- case PutGlobalVar:
- case VarInjectionWatchpoint:
- case CheckCell:
- case CheckNotEmpty:
- case CheckIdent:
- case RegExpExec:
- case RegExpTest:
- case CompareLess:
- case CompareLessEq:
- case CompareGreater:
- case CompareGreaterEq:
- case CompareEq:
- case CompareEqConstant:
- case CompareStrictEq:
- case Call:
- case Construct:
- case CallVarargs:
- case ConstructVarargs:
- case LoadVarargs:
- case CallForwardVarargs:
- case ConstructForwardVarargs:
- case Breakpoint:
- case ProfileWillCall:
- case ProfileDidCall:
- case ProfileType:
- case ProfileControlFlow:
- case CheckHasInstance:
- case InstanceOf:
- case IsUndefined:
- case IsBoolean:
- case IsNumber:
- case IsString:
- case IsObject:
- case IsObjectOrNull:
- case IsFunction:
- case TypeOf:
- case LogicalNot:
- case ToPrimitive:
- case ToString:
- case CallStringConstructor:
- case In:
- case Jump:
- case Branch:
- case Switch:
- case Return:
- case Throw:
- case CountExecution:
- case ForceOSRExit:
- case CheckWatchdogTimer:
- case StringFromCharCode:
- case Unreachable:
- case ExtractOSREntryLocal:
- case CheckTierUpInLoop:
- case CheckTierUpAtReturn:
- case CheckTierUpAndOSREnter:
- case CheckTierUpWithNestedTriggerAndOSREnter:
- case LoopHint:
- case StoreBarrier:
- case InvalidationPoint:
- case NotifyWrite:
- case CheckInBounds:
- case ConstantStoragePointer:
- case Check:
- case MultiGetByOffset:
- case ValueRep:
- case DoubleRep:
- case Int52Rep:
- case GetGetter:
- case GetSetter:
- case GetByVal:
- case GetIndexedPropertyStorage:
- case GetArrayLength:
- case ArrayPush:
- case ArrayPop:
- case StringCharAt:
- case StringCharCodeAt:
- case GetTypedArrayByteOffset:
- case PutByValDirect:
- case PutByVal:
- case PutByValAlias:
- case PutStructure:
- case GetByOffset:
- case GetGetterSetterByOffset:
- case PutByOffset:
- case GetEnumerableLength:
- case HasGenericProperty:
- case HasStructureProperty:
- case HasIndexedProperty:
- case GetDirectPname:
- case FiatInt52:
- case BooleanToNumber:
- case CheckBadCell:
- case BottomValue:
- case PhantomNewObject:
- case PhantomNewFunction:
- case PhantomCreateActivation:
- case PhantomDirectArguments:
- case PhantomClonedArguments:
- case GetMyArgumentByVal:
- case ForwardVarargs:
- case PutHint:
- case CheckStructureImmediate:
- case PutStack:
- case KillStack:
- case GetStack:
- case GetFromArguments:
- case PutToArguments:
- return false;
-
- case CreateActivation:
- case CreateDirectArguments:
- case CreateScopedArguments:
- case CreateClonedArguments:
- case ToThis:
- case CreateThis:
- case AllocatePropertyStorage:
- case ReallocatePropertyStorage:
- case Arrayify:
- case ArrayifyToStructure:
- case NewObject:
- case NewArray:
- case NewArrayWithSize:
- case NewArrayBuffer:
- case NewRegexp:
- case NewStringObject:
- case MakeRope:
- case NewFunction:
- case NewTypedArray:
- case ThrowReferenceError:
- case GetPropertyEnumerator:
- case GetEnumeratorStructurePname:
- case GetEnumeratorGenericPname:
- case ToIndexString:
- case MaterializeNewObject:
- case MaterializeCreateActivation:
- return true;
-
- case MultiPutByOffset:
- return node->multiPutByOffsetData().reallocatesStorage();
-
- case LastNodeType:
- RELEASE_ASSERT_NOT_REACHED();
- return true;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- return true;
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGDoesGC.h b/Source/JavaScriptCore/dfg/DFGDoesGC.h
deleted file mode 100644
index 4503d21f8..000000000
--- a/Source/JavaScriptCore/dfg/DFGDoesGC.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGDoesGC_h
-#define DFGDoesGC_h
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-class Graph;
-struct Node;
-
-bool doesGC(Graph&, Node*);
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGDoesGC_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGDominators.cpp b/Source/JavaScriptCore/dfg/DFGDominators.cpp
index 4c67e8b9e..a5ae614b9 100644
--- a/Source/JavaScriptCore/dfg/DFGDominators.cpp
+++ b/Source/JavaScriptCore/dfg/DFGDominators.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,11 +28,7 @@
#if ENABLE(DFG_JIT)
-#include "DFGBlockMapInlines.h"
-#include "DFGBlockWorklist.h"
#include "DFGGraph.h"
-#include "DFGNaiveDominators.h"
-#include "JSCInlines.h"
namespace JSC { namespace DFG {
@@ -44,429 +40,91 @@ Dominators::~Dominators()
{
}
-namespace {
-
-// This implements Lengauer and Tarjan's "A Fast Algorithm for Finding Dominators in a Flowgraph"
-// (TOPLAS 1979). It uses the "simple" implementation of LINK and EVAL, which yields an O(n log n)
-// solution. The full paper is linked below; this code attempts to closely follow the algorithm as
-// it is presented in the paper; in particular sections 3 and 4 as well as appendix B.
-// https://www.cs.princeton.edu/courses/archive/fall03/cs528/handouts/a%20fast%20algorithm%20for%20finding.pdf
-//
-// This code is very subtle. The Lengauer-Tarjan algorithm is incredibly deep to begin with. The
-// goal of this code is to follow the code in the paper, however our implementation must deviate
-// from the paper when it comes to recursion. The authors had used recursion to implement DFS, and
-// also to implement the "simple" EVAL. We convert both of those into worklist-based solutions.
-// Finally, once the algorithm gives us immediate dominators, we implement dominance tests by
-// walking the dominator tree and computing pre and post numbers. We then use the range inclusion
-// check trick that was first discovered by Paul F. Dietz in 1982 in "Maintaining order in a linked
-// list" (see http://dl.acm.org/citation.cfm?id=802184).
-
-class LengauerTarjan {
-public:
- LengauerTarjan(Graph& graph)
- : m_graph(graph)
- , m_data(graph)
- {
- for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
- BasicBlock* block = m_graph.block(blockIndex);
- if (!block)
- continue;
- m_data[block].label = block;
- }
- }
-
- void compute()
- {
- computeDepthFirstPreNumbering(); // Step 1.
- computeSemiDominatorsAndImplicitImmediateDominators(); // Steps 2 and 3.
- computeExplicitImmediateDominators(); // Step 4.
- }
-
- BasicBlock* immediateDominator(BasicBlock* block)
- {
- return m_data[block].dom;
- }
-
-private:
- void computeDepthFirstPreNumbering()
- {
- // Use a block worklist that also tracks the index inside the successor list. This is
- // necessary for ensuring that we don't attempt to visit a successor until the previous
- // successors that we had visited are fully processed. This ends up being revealed in the
- // output of this method because the first time we see an edge to a block, we set the
- // block's parent. So, if we have:
- //
- // A -> B
- // A -> C
- // B -> C
- //
- // And we're processing A, then we want to ensure that if we see A->B first (and hence set
- // B's prenumber before we set C's) then we also end up setting C's parent to B by virtue
- // of not noticing A->C until we're done processing B.
-
- ExtendedBlockWorklist<unsigned> worklist;
- worklist.push(m_graph.block(0), 0);
-
- while (BlockWith<unsigned> item = worklist.pop()) {
- BasicBlock* block = item.block;
- unsigned successorIndex = item.data;
-
- // We initially push with successorIndex = 0 regardless of whether or not we have any
- // successors. This is so that we can assign our prenumber. Subsequently we get pushed
- // with higher successorIndex values, but only if they are in range.
- ASSERT(!successorIndex || successorIndex < block->numSuccessors());
-
- if (!successorIndex) {
- m_data[block].semiNumber = m_blockByPreNumber.size();
- m_blockByPreNumber.append(block);
- }
-
- if (successorIndex < block->numSuccessors()) {
- unsigned nextSuccessorIndex = successorIndex + 1;
- if (nextSuccessorIndex < block->numSuccessors())
- worklist.forcePush(block, nextSuccessorIndex);
-
- BasicBlock* successorBlock = block->successor(successorIndex);
- if (worklist.push(successorBlock, 0))
- m_data[successorBlock].parent = block;
- }
- }
- }
-
- void computeSemiDominatorsAndImplicitImmediateDominators()
- {
- for (unsigned currentPreNumber = m_blockByPreNumber.size(); currentPreNumber-- > 1;) {
- BasicBlock* block = m_blockByPreNumber[currentPreNumber];
- BlockData& blockData = m_data[block];
-
- // Step 2:
- for (BasicBlock* predecessorBlock : block->predecessors) {
- BasicBlock* intermediateBlock = eval(predecessorBlock);
- blockData.semiNumber = std::min(
- m_data[intermediateBlock].semiNumber, blockData.semiNumber);
- }
- unsigned bucketPreNumber = blockData.semiNumber;
- ASSERT(bucketPreNumber <= currentPreNumber);
- m_data[m_blockByPreNumber[bucketPreNumber]].bucket.append(block);
- link(blockData.parent, block);
-
- // Step 3:
- for (BasicBlock* semiDominee : m_data[blockData.parent].bucket) {
- BasicBlock* possibleDominator = eval(semiDominee);
- BlockData& semiDomineeData = m_data[semiDominee];
- ASSERT(m_blockByPreNumber[semiDomineeData.semiNumber] == blockData.parent);
- BlockData& possibleDominatorData = m_data[possibleDominator];
- if (possibleDominatorData.semiNumber < semiDomineeData.semiNumber)
- semiDomineeData.dom = possibleDominator;
- else
- semiDomineeData.dom = blockData.parent;
- }
- m_data[blockData.parent].bucket.clear();
- }
- }
-
- void computeExplicitImmediateDominators()
- {
- for (unsigned currentPreNumber = 1; currentPreNumber < m_blockByPreNumber.size(); ++currentPreNumber) {
- BasicBlock* block = m_blockByPreNumber[currentPreNumber];
- BlockData& blockData = m_data[block];
-
- if (blockData.dom != m_blockByPreNumber[blockData.semiNumber])
- blockData.dom = m_data[blockData.dom].dom;
- }
- }
+void Dominators::compute(Graph& graph)
+{
+ // This implements a naive dominator solver.
- void link(BasicBlock* from, BasicBlock* to)
- {
- m_data[to].ancestor = from;
- }
+ ASSERT(graph.block(0)->predecessors.isEmpty());
- BasicBlock* eval(BasicBlock* block)
- {
- if (!m_data[block].ancestor)
- return block;
-
- compress(block);
- return m_data[block].label;
- }
+ unsigned numBlocks = graph.numBlocks();
- void compress(BasicBlock* initialBlock)
- {
- // This was meant to be a recursive function, but we don't like recursion because we don't
- // want to blow the stack. The original function will call compress() recursively on the
- // ancestor of anything that has an ancestor. So, we populate our worklist with the
- // recursive ancestors of initialBlock. Then we process the list starting from the block
- // that is furthest up the ancestor chain.
-
- BasicBlock* ancestor = m_data[initialBlock].ancestor;
- ASSERT(ancestor);
- if (!m_data[ancestor].ancestor)
- return;
-
- Vector<BasicBlock*, 16> stack;
- for (BasicBlock* block = initialBlock; block; block = m_data[block].ancestor)
- stack.append(block);
-
- // We only care about blocks that have an ancestor that has an ancestor. The last two
- // elements in the stack won't satisfy this property.
- ASSERT(stack.size() >= 2);
- ASSERT(!m_data[stack[stack.size() - 1]].ancestor);
- ASSERT(!m_data[m_data[stack[stack.size() - 2]].ancestor].ancestor);
-
- for (unsigned i = stack.size() - 2; i--;) {
- BasicBlock* block = stack[i];
- BasicBlock*& labelOfBlock = m_data[block].label;
- BasicBlock*& ancestorOfBlock = m_data[block].ancestor;
- ASSERT(ancestorOfBlock);
- ASSERT(m_data[ancestorOfBlock].ancestor);
-
- BasicBlock* labelOfAncestorOfBlock = m_data[ancestorOfBlock].label;
-
- if (m_data[labelOfAncestorOfBlock].semiNumber < m_data[labelOfBlock].semiNumber)
- labelOfBlock = labelOfAncestorOfBlock;
- ancestorOfBlock = m_data[ancestorOfBlock].ancestor;
- }
+ // Allocate storage for the dense dominance matrix.
+ if (numBlocks > m_results.size()) {
+ m_results.grow(numBlocks);
+ for (unsigned i = numBlocks; i--;)
+ m_results[i].resize(numBlocks);
+ m_scratch.resize(numBlocks);
}
- struct BlockData {
- BlockData()
- : parent(nullptr)
- , preNumber(UINT_MAX)
- , semiNumber(UINT_MAX)
- , ancestor(nullptr)
- , label(nullptr)
- , dom(nullptr)
- {
- }
-
- BasicBlock* parent;
- unsigned preNumber;
- unsigned semiNumber;
- BasicBlock* ancestor;
- BasicBlock* label;
- Vector<BasicBlock*> bucket;
- BasicBlock* dom;
- };
-
- Graph& m_graph;
- BlockMap<BlockData> m_data;
- Vector<BasicBlock*> m_blockByPreNumber;
-};
+ // We know that the entry block is only dominated by itself.
+ m_results[0].clearAll();
+ m_results[0].set(0);
-struct ValidationContext {
- ValidationContext(Graph& graph, Dominators& dominators)
- : graph(graph)
- , dominators(dominators)
- {
- }
-
- void reportError(BasicBlock* from, BasicBlock* to, const char* message)
- {
- Error error;
- error.from = from;
- error.to = to;
- error.message = message;
- errors.append(error);
+ // Find all of the valid blocks.
+ m_scratch.clearAll();
+ for (unsigned i = numBlocks; i--;) {
+ if (!graph.block(i))
+ continue;
+ m_scratch.set(i);
}
- void handleErrors()
- {
- if (errors.isEmpty())
- return;
-
- startCrashing();
- dataLog("DFG DOMINATOR VALIDATION FAILED:\n");
- dataLog("\n");
- dataLog("For block domination relationships:\n");
- for (unsigned i = 0; i < errors.size(); ++i) {
- dataLog(
- " ", pointerDump(errors[i].from), " -> ", pointerDump(errors[i].to),
- " (", errors[i].message, ")\n");
- }
- dataLog("\n");
- dataLog("Control flow graph:\n");
- for (BlockIndex blockIndex = 0; blockIndex < graph.numBlocks(); ++blockIndex) {
- BasicBlock* block = graph.block(blockIndex);
- if (!block)
- continue;
- dataLog(" Block #", blockIndex, ": successors = [");
- CommaPrinter comma;
- for (unsigned i = 0; i < block->numSuccessors(); ++i)
- dataLog(comma, *block->successor(i));
- dataLog("], predecessors = [");
- comma = CommaPrinter();
- for (unsigned i = 0; i < block->predecessors.size(); ++i)
- dataLog(comma, *block->predecessors[i]);
- dataLog("]\n");
- }
- dataLog("\n");
- dataLog("Lengauer-Tarjan Dominators:\n");
- dataLog(dominators);
- dataLog("\n");
- dataLog("Naive Dominators:\n");
- naiveDominators.dump(graph, WTF::dataFile());
- dataLog("\n");
- dataLog("Graph at time of failure:\n");
- graph.dump();
- dataLog("\n");
- dataLog("DFG DOMINATOR VALIDATION FAILIED!\n");
- CRASH();
+ // Mark all nodes as dominated by everything.
+ for (unsigned i = numBlocks; i-- > 1;) {
+ if (!graph.block(i) || graph.block(i)->predecessors.isEmpty())
+ m_results[i].clearAll();
+ else
+ m_results[i].set(m_scratch);
}
-
- Graph& graph;
- Dominators& dominators;
- NaiveDominators naiveDominators;
-
- struct Error {
- BasicBlock* from;
- BasicBlock* to;
- const char* message;
- };
-
- Vector<Error> errors;
-};
-
-} // anonymous namespace
-void Dominators::compute(Graph& graph)
-{
- LengauerTarjan lengauerTarjan(graph);
- lengauerTarjan.compute();
+ // Iteratively eliminate nodes that are not dominator.
+ bool changed;
+ do {
+ changed = false;
+ // Prune dominators in all non entry blocks: forward scan.
+ for (unsigned i = 1; i < numBlocks; ++i)
+ changed |= pruneDominators(graph, i);
- m_data = BlockMap<BlockData>(graph);
-
- // From here we want to build a spanning tree with both upward and downward links and we want
- // to do a search over this tree to compute pre and post numbers that can be used for dominance
- // tests.
-
- for (BlockIndex blockIndex = graph.numBlocks(); blockIndex--;) {
- BasicBlock* block = graph.block(blockIndex);
- if (!block)
- continue;
-
- BasicBlock* idomBlock = lengauerTarjan.immediateDominator(block);
- m_data[block].idomParent = idomBlock;
- if (idomBlock)
- m_data[idomBlock].idomKids.append(block);
- }
-
- unsigned nextPreNumber = 0;
- unsigned nextPostNumber = 0;
-
- // Plain stack-based worklist because we are guaranteed to see each block exactly once anyway.
- Vector<BlockWithOrder> worklist;
- worklist.append(BlockWithOrder(graph.block(0), PreOrder));
- while (!worklist.isEmpty()) {
- BlockWithOrder item = worklist.takeLast();
- switch (item.order) {
- case PreOrder:
- m_data[item.block].preNumber = nextPreNumber++;
- worklist.append(BlockWithOrder(item.block, PostOrder));
- for (BasicBlock* kid : m_data[item.block].idomKids)
- worklist.append(BlockWithOrder(kid, PreOrder));
+ if (!changed)
break;
- case PostOrder:
- m_data[item.block].postNumber = nextPostNumber++;
- break;
- }
- }
-
- if (validationEnabled()) {
- // Check our dominator calculation:
- // 1) Check that our range-based ancestry test is the same as a naive ancestry test.
- // 2) Check that our notion of who dominates whom is identical to a naive (not
- // Lengauer-Tarjan) dominator calculation.
-
- ValidationContext context(graph, *this);
- context.naiveDominators.compute(graph);
-
- for (BlockIndex fromBlockIndex = graph.numBlocks(); fromBlockIndex--;) {
- BasicBlock* fromBlock = graph.block(fromBlockIndex);
- if (!fromBlock || m_data[fromBlock].preNumber == UINT_MAX)
- continue;
- for (BlockIndex toBlockIndex = graph.numBlocks(); toBlockIndex--;) {
- BasicBlock* toBlock = graph.block(toBlockIndex);
- if (!toBlock || m_data[toBlock].preNumber == UINT_MAX)
- continue;
-
- if (dominates(fromBlock, toBlock) != naiveDominates(fromBlock, toBlock))
- context.reportError(fromBlock, toBlock, "Range-based domination check is broken");
- if (dominates(fromBlock, toBlock) != context.naiveDominators.dominates(fromBlock, toBlock))
- context.reportError(fromBlock, toBlock, "Lengauer-Tarjan domination is broken");
- }
- }
-
- context.handleErrors();
- }
-}
-BlockSet Dominators::strictDominatorsOf(BasicBlock* to) const
-{
- BlockSet result;
- forAllStrictDominatorsOf(to, BlockAdder(result));
- return result;
+ // Prune dominators in all non entry blocks: backward scan.
+ changed = false;
+ for (unsigned i = numBlocks; i-- > 1;)
+ changed |= pruneDominators(graph, i);
+ } while (changed);
}
-BlockSet Dominators::dominatorsOf(BasicBlock* to) const
+bool Dominators::pruneDominators(Graph& graph, BlockIndex idx)
{
- BlockSet result;
- forAllDominatorsOf(to, BlockAdder(result));
- return result;
-}
+ BasicBlock* block = graph.block(idx);
-BlockSet Dominators::blocksStrictlyDominatedBy(BasicBlock* from) const
-{
- BlockSet result;
- forAllBlocksStrictlyDominatedBy(from, BlockAdder(result));
- return result;
-}
+ if (!block || block->predecessors.isEmpty())
+ return false;
-BlockSet Dominators::blocksDominatedBy(BasicBlock* from) const
-{
- BlockSet result;
- forAllBlocksDominatedBy(from, BlockAdder(result));
- return result;
-}
+ // Find the intersection of dom(preds).
+ m_scratch.set(m_results[block->predecessors[0]->index]);
+ for (unsigned j = block->predecessors.size(); j-- > 1;)
+ m_scratch.filter(m_results[block->predecessors[j]->index]);
-BlockSet Dominators::dominanceFrontierOf(BasicBlock* from) const
-{
- BlockSet result;
- forAllBlocksInDominanceFrontierOfImpl(from, BlockAdder(result));
- return result;
-}
-
-BlockSet Dominators::iteratedDominanceFrontierOf(const BlockList& from) const
-{
- BlockSet result;
- forAllBlocksInIteratedDominanceFrontierOfImpl(from, BlockAdder(result));
- return result;
-}
+ // The block is also dominated by itself.
+ m_scratch.set(idx);
-bool Dominators::naiveDominates(BasicBlock* from, BasicBlock* to) const
-{
- for (BasicBlock* block = to; block; block = m_data[block].idomParent) {
- if (block == from)
- return true;
- }
- return false;
+ return m_results[idx].setAndCheck(m_scratch);
}
-void Dominators::dump(PrintStream& out) const
+void Dominators::dump(Graph& graph, PrintStream& out) const
{
- if (!isValid()) {
- out.print(" Not Valid.\n");
- return;
- }
-
- for (BlockIndex blockIndex = 0; blockIndex < m_data.size(); ++blockIndex) {
- if (m_data[blockIndex].preNumber == UINT_MAX)
+ for (BlockIndex blockIndex = 0; blockIndex < graph.numBlocks(); ++blockIndex) {
+ BasicBlock* block = graph.block(blockIndex);
+ if (!block)
continue;
-
- out.print(" Block #", blockIndex, ": idom = ", pointerDump(m_data[blockIndex].idomParent), ", idomKids = [");
- CommaPrinter comma;
- for (unsigned i = 0; i < m_data[blockIndex].idomKids.size(); ++i)
- out.print(comma, *m_data[blockIndex].idomKids[i]);
- out.print("], pre/post = ", m_data[blockIndex].preNumber, "/", m_data[blockIndex].postNumber, "\n");
+ out.print(" Block ", *block, ":");
+ for (BlockIndex otherIndex = 0; otherIndex < graph.numBlocks(); ++otherIndex) {
+ if (!dominates(block->index, otherIndex))
+ continue;
+ out.print(" #", otherIndex);
+ }
+ out.print("\n");
}
}
diff --git a/Source/JavaScriptCore/dfg/DFGDominators.h b/Source/JavaScriptCore/dfg/DFGDominators.h
index e218ba792..c63a84baf 100644
--- a/Source/JavaScriptCore/dfg/DFGDominators.h
+++ b/Source/JavaScriptCore/dfg/DFGDominators.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,13 +26,14 @@
#ifndef DFGDominators_h
#define DFGDominators_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGAnalysis.h"
#include "DFGBasicBlock.h"
-#include "DFGBlockMap.h"
-#include "DFGBlockSet.h"
#include "DFGCommon.h"
+#include <wtf/FastBitVector.h>
namespace JSC { namespace DFG {
@@ -43,175 +44,26 @@ public:
Dominators();
~Dominators();
- void compute(Graph&);
+ void compute(Graph& graph);
- bool strictlyDominates(BasicBlock* from, BasicBlock* to) const
+ bool dominates(BlockIndex from, BlockIndex to) const
{
ASSERT(isValid());
- return m_data[to].preNumber > m_data[from].preNumber
- && m_data[to].postNumber < m_data[from].postNumber;
+ return m_results[to].get(from);
}
bool dominates(BasicBlock* from, BasicBlock* to) const
{
- return from == to || strictlyDominates(from, to);
- }
-
- BasicBlock* immediateDominatorOf(BasicBlock* block) const
- {
- return m_data[block].idomParent;
- }
-
- template<typename Functor>
- void forAllStrictDominatorsOf(BasicBlock* to, const Functor& functor) const
- {
- for (BasicBlock* block = m_data[to].idomParent; block; block = m_data[block].idomParent)
- functor(block);
- }
-
- template<typename Functor>
- void forAllDominatorsOf(BasicBlock* to, const Functor& functor) const
- {
- for (BasicBlock* block = to; block; block = m_data[block].idomParent)
- functor(block);
+ return dominates(from->index, to->index);
}
- template<typename Functor>
- void forAllBlocksStrictlyDominatedBy(BasicBlock* from, const Functor& functor) const
- {
- Vector<BasicBlock*, 16> worklist;
- worklist.appendVector(m_data[from].idomKids);
- while (!worklist.isEmpty()) {
- BasicBlock* block = worklist.takeLast();
- functor(block);
- worklist.appendVector(m_data[block].idomKids);
- }
- }
-
- template<typename Functor>
- void forAllBlocksDominatedBy(BasicBlock* from, const Functor& functor) const
- {
- Vector<BasicBlock*, 16> worklist;
- worklist.append(from);
- while (!worklist.isEmpty()) {
- BasicBlock* block = worklist.takeLast();
- functor(block);
- worklist.appendVector(m_data[block].idomKids);
- }
- }
-
- BlockSet strictDominatorsOf(BasicBlock* to) const;
- BlockSet dominatorsOf(BasicBlock* to) const;
- BlockSet blocksStrictlyDominatedBy(BasicBlock* from) const;
- BlockSet blocksDominatedBy(BasicBlock* from) const;
-
- template<typename Functor>
- void forAllBlocksInDominanceFrontierOf(
- BasicBlock* from, const Functor& functor) const
- {
- BlockSet set;
- forAllBlocksInDominanceFrontierOfImpl(
- from,
- [&] (BasicBlock* block) {
- if (set.add(block))
- functor(block);
- });
- }
-
- BlockSet dominanceFrontierOf(BasicBlock* from) const;
-
- template<typename Functor>
- void forAllBlocksInIteratedDominanceFrontierOf(
- const BlockList& from, const Functor& functor)
- {
- forAllBlocksInPrunedIteratedDominanceFrontierOf(
- from,
- [&] (BasicBlock* block) -> bool {
- functor(block);
- return true;
- });
- }
-
- // This is a close relative of forAllBlocksInIteratedDominanceFrontierOf(), which allows the
- // given functor to return false to indicate that we don't wish to consider the given block.
- // Useful for computing pruned SSA form.
- template<typename Functor>
- void forAllBlocksInPrunedIteratedDominanceFrontierOf(
- const BlockList& from, const Functor& functor)
- {
- BlockSet set;
- forAllBlocksInIteratedDominanceFrontierOfImpl(
- from,
- [&] (BasicBlock* block) -> bool {
- if (!set.add(block))
- return false;
- return functor(block);
- });
- }
-
- BlockSet iteratedDominanceFrontierOf(const BlockList& from) const;
-
- void dump(PrintStream&) const;
+ void dump(Graph& graph, PrintStream&) const;
private:
- bool naiveDominates(BasicBlock* from, BasicBlock* to) const;
-
- template<typename Functor>
- void forAllBlocksInDominanceFrontierOfImpl(
- BasicBlock* from, const Functor& functor) const
- {
- // Paraphrasing from http://en.wikipedia.org/wiki/Dominator_(graph_theory):
- // "The dominance frontier of a block 'from' is the set of all blocks 'to' such that
- // 'from' dominates an immediate predecessor of 'to', but 'from' does not strictly
- // dominate 'to'."
- //
- // A useful corner case to remember: a block may be in its own dominance frontier if it has
- // a loop edge to itself, since it dominates itself and so it dominates its own immediate
- // predecessor, and a block never strictly dominates itself.
-
- forAllBlocksDominatedBy(
- from,
- [&] (BasicBlock* block) {
- for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) {
- BasicBlock* to = block->successor(successorIndex);
- if (!strictlyDominates(from, to))
- functor(to);
- }
- });
- }
-
- template<typename Functor>
- void forAllBlocksInIteratedDominanceFrontierOfImpl(
- const BlockList& from, const Functor& functor) const
- {
- BlockList worklist = from;
- while (!worklist.isEmpty()) {
- BasicBlock* block = worklist.takeLast();
- forAllBlocksInDominanceFrontierOfImpl(
- block,
- [&] (BasicBlock* otherBlock) {
- if (functor(otherBlock))
- worklist.append(otherBlock);
- });
- }
- }
-
- struct BlockData {
- BlockData()
- : idomParent(nullptr)
- , preNumber(UINT_MAX)
- , postNumber(UINT_MAX)
- {
- }
-
- Vector<BasicBlock*> idomKids;
- BasicBlock* idomParent;
-
- unsigned preNumber;
- unsigned postNumber;
- };
+ bool pruneDominators(Graph&, BlockIndex);
- BlockMap<BlockData> m_data;
+ Vector<FastBitVector> m_results; // For each block, the bitvector of blocks that dominate it.
+ FastBitVector m_scratch; // A temporary bitvector with bit for each block. We recycle this to save new/deletes.
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGDriver.cpp b/Source/JavaScriptCore/dfg/DFGDriver.cpp
index fe407aae6..780ad6c22 100644
--- a/Source/JavaScriptCore/dfg/DFGDriver.cpp
+++ b/Source/JavaScriptCore/dfg/DFGDriver.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,16 +30,15 @@
#include "JSString.h"
#include "CodeBlock.h"
-#include "DFGFunctionWhitelist.h"
#include "DFGJITCode.h"
#include "DFGPlan.h"
#include "DFGThunks.h"
#include "DFGWorklist.h"
+#include "Debugger.h"
#include "JITCode.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include "Options.h"
#include "SamplingTool.h"
-#include "TypeProfilerLog.h"
#include <wtf/Atomics.h>
#if ENABLE(FTL_JIT)
@@ -57,24 +56,32 @@ unsigned getNumCompilations()
#if ENABLE(DFG_JIT)
static CompilationResult compileImpl(
- VM& vm, CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock, CompilationMode mode,
- unsigned osrEntryBytecodeIndex, const Operands<JSValue>& mustHandleValues,
- PassRefPtr<DeferredCompilationCallback> callback)
+ VM& vm, CodeBlock* codeBlock, CompilationMode mode, unsigned osrEntryBytecodeIndex,
+ const Operands<JSValue>& mustHandleValues,
+ PassRefPtr<DeferredCompilationCallback> callback, Worklist* worklist)
{
SamplingRegion samplingRegion("DFG Compilation (Driver)");
- if (!Options::bytecodeRangeToDFGCompile().isInRange(codeBlock->instructionCount())
- || !FunctionWhitelist::ensureGlobalWhitelist().contains(codeBlock))
- return CompilationFailed;
-
numCompilations++;
ASSERT(codeBlock);
ASSERT(codeBlock->alternative());
ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT);
- ASSERT(!profiledDFGCodeBlock || profiledDFGCodeBlock->jitType() == JITCode::DFGJIT);
- if (logCompilationChanges(mode))
+ if (!Options::useDFGJIT() || !MacroAssembler::supportsFloatingPoint())
+ return CompilationFailed;
+
+ if (!Options::bytecodeRangeToDFGCompile().isInRange(codeBlock->instructionCount()))
+ return CompilationFailed;
+
+ if (vm.enabledProfiler())
+ return CompilationInvalidated;
+
+ Debugger* debugger = codeBlock->globalObject()->debugger();
+ if (debugger && (debugger->isStepping() || codeBlock->baselineAlternative()->hasDebuggerRequests()))
+ return CompilationInvalidated;
+
+ if (logCompilationChanges())
dataLog("DFG(Driver) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n");
// Make sure that any stubs that the DFG is going to use are initialized. We want to
@@ -82,44 +89,42 @@ static CompilationResult compileImpl(
vm.getCTIStub(osrExitGenerationThunkGenerator);
vm.getCTIStub(throwExceptionFromCallSlowPathGenerator);
vm.getCTIStub(linkCallThunkGenerator);
- vm.getCTIStub(linkPolymorphicCallThunkGenerator);
-
- if (vm.typeProfiler())
- vm.typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for DFG compilation."));
+ vm.getCTIStub(linkConstructThunkGenerator);
+ vm.getCTIStub(linkClosureCallThunkGenerator);
+ vm.getCTIStub(virtualCallThunkGenerator);
+ vm.getCTIStub(virtualConstructThunkGenerator);
RefPtr<Plan> plan = adoptRef(
- new Plan(codeBlock, profiledDFGCodeBlock, mode, osrEntryBytecodeIndex, mustHandleValues));
+ new Plan(codeBlock, mode, osrEntryBytecodeIndex, mustHandleValues));
- plan->callback = callback;
- if (Options::enableConcurrentJIT()) {
- Worklist* worklist = ensureGlobalWorklistFor(mode);
- if (logCompilationChanges(mode))
+ if (worklist) {
+ plan->callback = callback;
+ if (logCompilationChanges())
dataLog("Deferring DFG compilation of ", *codeBlock, " with queue length ", worklist->queueLength(), ".\n");
worklist->enqueue(plan);
return CompilationDeferred;
}
- plan->compileInThread(*vm.dfgState, 0);
+ plan->compileInThread(*vm.dfgState);
return plan->finalizeWithoutNotifyingCallback();
}
#else // ENABLE(DFG_JIT)
static CompilationResult compileImpl(
- VM&, CodeBlock*, CodeBlock*, CompilationMode, unsigned, const Operands<JSValue>&,
- PassRefPtr<DeferredCompilationCallback>)
+ VM&, CodeBlock*, CompilationMode, unsigned, const Operands<JSValue>&,
+ PassRefPtr<DeferredCompilationCallback>, Worklist*)
{
return CompilationFailed;
}
#endif // ENABLE(DFG_JIT)
CompilationResult compile(
- VM& vm, CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock, CompilationMode mode,
- unsigned osrEntryBytecodeIndex, const Operands<JSValue>& mustHandleValues,
- PassRefPtr<DeferredCompilationCallback> passedCallback)
+ VM& vm, CodeBlock* codeBlock, CompilationMode mode, unsigned osrEntryBytecodeIndex,
+ const Operands<JSValue>& mustHandleValues,
+ PassRefPtr<DeferredCompilationCallback> passedCallback, Worklist* worklist)
{
RefPtr<DeferredCompilationCallback> callback = passedCallback;
CompilationResult result = compileImpl(
- vm, codeBlock, profiledDFGCodeBlock, mode, osrEntryBytecodeIndex, mustHandleValues,
- callback);
+ vm, codeBlock, mode, osrEntryBytecodeIndex, mustHandleValues, callback, worklist);
if (result != CompilationDeferred)
callback->compilationDidComplete(codeBlock, result);
return result;
diff --git a/Source/JavaScriptCore/dfg/DFGDriver.h b/Source/JavaScriptCore/dfg/DFGDriver.h
index a456dc66e..9d43638df 100644
--- a/Source/JavaScriptCore/dfg/DFGDriver.h
+++ b/Source/JavaScriptCore/dfg/DFGDriver.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,6 +29,7 @@
#include "CallFrame.h"
#include "DFGCompilationMode.h"
#include "DFGPlan.h"
+#include <wtf/Platform.h>
namespace JSC {
@@ -39,14 +40,16 @@ class VM;
namespace DFG {
+class Worklist;
+
JS_EXPORT_PRIVATE unsigned getNumCompilations();
// If the worklist is non-null, we do a concurrent compile. Otherwise we do a synchronous
// compile. Even if we do a synchronous compile, we call the callback with the result.
CompilationResult compile(
- VM&, CodeBlock*, CodeBlock* profiledDFGCodeBlock, CompilationMode,
- unsigned osrEntryBytecodeIndex, const Operands<JSValue>& mustHandleValues,
- PassRefPtr<DeferredCompilationCallback>);
+ VM&, CodeBlock*, CompilationMode, unsigned osrEntryBytecodeIndex,
+ const Operands<JSValue>& mustHandleValues,
+ PassRefPtr<DeferredCompilationCallback>, Worklist*);
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGEdge.cpp b/Source/JavaScriptCore/dfg/DFGEdge.cpp
index 154401e4d..eafe31faf 100644
--- a/Source/JavaScriptCore/dfg/DFGEdge.cpp
+++ b/Source/JavaScriptCore/dfg/DFGEdge.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,18 +29,17 @@
#if ENABLE(DFG_JIT)
#include "DFGNode.h"
-#include "JSCInlines.h"
namespace JSC { namespace DFG {
void Edge::dump(PrintStream& out) const
{
- if (useKindUnchecked() != UntypedUse) {
- if (!isProved())
+ if (useKind() != UntypedUse) {
+ if (needsCheck())
out.print("Check:");
out.print(useKind(), ":");
}
- if (DFG::doesKill(killStatusUnchecked()))
+ if (doesKill())
out.print("Kill:");
out.print(node());
}
diff --git a/Source/JavaScriptCore/dfg/DFGEdge.h b/Source/JavaScriptCore/dfg/DFGEdge.h
index 3dec1893f..e641b65b7 100644
--- a/Source/JavaScriptCore/dfg/DFGEdge.h
+++ b/Source/JavaScriptCore/dfg/DFGEdge.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGEdge_h
#define DFGEdge_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
@@ -115,10 +117,14 @@ public:
{
return proofStatus() == IsProved;
}
+ bool needsCheck() const
+ {
+ return proofStatus() == NeedsCheck;
+ }
bool willNotHaveCheck() const
{
- return isProved() || shouldNotHaveTypeCheck(useKind());
+ return isProved() || useKind() == UntypedUse;
}
bool willHaveCheck() const
{
@@ -147,20 +153,11 @@ public:
bool doesNotKill() const { return !doesKill(); }
bool isSet() const { return !!node(); }
-
- Edge sanitized() const
- {
- Edge result = *this;
-#if USE(JSVALUE64)
- result.m_encodedWord = makeWord(node(), useKindUnchecked(), NeedsCheck, DoesNotKill);
-#else
- result.m_encodedWord = makeWord(useKindUnchecked(), NeedsCheck, DoesNotKill);
-#endif
- return result;
- }
-
+
+ typedef void* Edge::*UnspecifiedBoolType;
+ operator UnspecifiedBoolType*() const { return reinterpret_cast<UnspecifiedBoolType*>(isSet()); }
+
bool operator!() const { return !isSet(); }
- explicit operator bool() const { return isSet(); }
bool operator==(Edge other) const
{
@@ -176,15 +173,6 @@ public:
}
void dump(PrintStream&) const;
-
- unsigned hash() const
- {
-#if USE(JSVALUE64)
- return IntHash<uintptr_t>::hash(m_encodedWord);
-#else
- return PtrHash<Node*>::hash(m_node) + m_encodedWord;
-#endif
- }
private:
friend class AdjacencyList;
@@ -199,13 +187,13 @@ private:
ASSERT((shiftedValue >> shift()) == bitwise_cast<uintptr_t>(node));
ASSERT(useKind >= 0 && useKind < LastUseKind);
ASSERT((static_cast<uintptr_t>(LastUseKind) << 2) <= (static_cast<uintptr_t>(2) << shift()));
- return shiftedValue | (static_cast<uintptr_t>(useKind) << 2) | (DFG::doesKill(killStatus) << 1) | static_cast<uintptr_t>(DFG::isProved(proofStatus));
+ return shiftedValue | (static_cast<uintptr_t>(useKind) << 2) | (DFG::doesKill(killStatus) << 1) | DFG::isProved(proofStatus);
}
#else
static uintptr_t makeWord(UseKind useKind, ProofStatus proofStatus, KillStatus killStatus)
{
- return (static_cast<uintptr_t>(useKind) << 2) | (DFG::doesKill(killStatus) << 1) | static_cast<uintptr_t>(DFG::isProved(proofStatus));
+ return (static_cast<uintptr_t>(useKind) << 2) | (DFG::doesKill(killStatus) << 1) | DFG::isProved(proofStatus);
}
Node* m_node;
diff --git a/Source/JavaScriptCore/dfg/DFGEdgeDominates.h b/Source/JavaScriptCore/dfg/DFGEdgeDominates.h
index d95c79df2..0d514db55 100644
--- a/Source/JavaScriptCore/dfg/DFGEdgeDominates.h
+++ b/Source/JavaScriptCore/dfg/DFGEdgeDominates.h
@@ -26,6 +26,8 @@
#ifndef DFGEdgeDominates_h
#define DFGEdgeDominates_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGGraph.h"
@@ -45,10 +47,10 @@ public:
void operator()(Node*, Edge edge)
{
- bool result = m_graph.m_dominators.dominates(edge.node()->owner, m_block);
+ bool result = m_graph.m_dominators.dominates(edge.node()->misc.owner, m_block);
if (verbose) {
dataLog(
- "Checking if ", edge, " in ", *edge.node()->owner,
+ "Checking if ", edge, " in ", *edge.node()->misc.owner,
" dominates ", *m_block, ": ", result, "\n");
}
m_result &= result;
diff --git a/Source/JavaScriptCore/dfg/DFGEdgeUsesStructure.h b/Source/JavaScriptCore/dfg/DFGEdgeUsesStructure.h
index d5e748f1f..bd8063a0c 100644
--- a/Source/JavaScriptCore/dfg/DFGEdgeUsesStructure.h
+++ b/Source/JavaScriptCore/dfg/DFGEdgeUsesStructure.h
@@ -26,6 +26,8 @@
#ifndef DFGEdgeUsesStructure_h
#define DFGEdgeUsesStructure_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGGraph.h"
diff --git a/Source/JavaScriptCore/dfg/DFGEpoch.cpp b/Source/JavaScriptCore/dfg/DFGEpoch.cpp
deleted file mode 100644
index 7da1deb8c..000000000
--- a/Source/JavaScriptCore/dfg/DFGEpoch.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGEpoch.h"
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-void Epoch::dump(PrintStream& out) const
-{
- if (!*this)
- out.print("none");
- else
- out.print(m_epoch);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGEpoch.h b/Source/JavaScriptCore/dfg/DFGEpoch.h
deleted file mode 100644
index 9865dd7e0..000000000
--- a/Source/JavaScriptCore/dfg/DFGEpoch.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGEpoch_h
-#define DFGEpoch_h
-
-#if ENABLE(DFG_JIT)
-
-#include <wtf/PrintStream.h>
-
-namespace JSC { namespace DFG {
-
-// Utility class for epoch-based analyses.
-
-class Epoch {
-public:
- Epoch()
- : m_epoch(s_none)
- {
- }
-
- static Epoch fromUnsigned(unsigned value)
- {
- Epoch result;
- result.m_epoch = value;
- return result;
- }
-
- unsigned toUnsigned() const
- {
- return m_epoch;
- }
-
- static Epoch first()
- {
- Epoch result;
- result.m_epoch = s_first;
- return result;
- }
-
- bool operator!() const
- {
- return m_epoch == s_none;
- }
-
- Epoch next() const
- {
- Epoch result;
- result.m_epoch = m_epoch + 1;
- return result;
- }
-
- void bump()
- {
- *this = next();
- }
-
- bool operator==(const Epoch& other) const
- {
- return m_epoch == other.m_epoch;
- }
-
- bool operator!=(const Epoch& other) const
- {
- return !(*this == other);
- }
-
- bool operator<(const Epoch& other) const
- {
- return m_epoch < other.m_epoch;
- }
-
- bool operator>(const Epoch& other) const
- {
- return other < *this;
- }
-
- bool operator<=(const Epoch& other) const
- {
- return !(*this > other);
- }
-
- bool operator>=(const Epoch& other) const
- {
- return !(*this < other);
- }
-
- void dump(PrintStream&) const;
-
-private:
- static const unsigned s_none = 0;
- static const unsigned s_first = 1;
-
- unsigned m_epoch;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGEpoch_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGFailedFinalizer.cpp b/Source/JavaScriptCore/dfg/DFGFailedFinalizer.cpp
index d2562a8e5..fb00e2021 100644
--- a/Source/JavaScriptCore/dfg/DFGFailedFinalizer.cpp
+++ b/Source/JavaScriptCore/dfg/DFGFailedFinalizer.cpp
@@ -28,8 +28,6 @@
#if ENABLE(DFG_JIT)
-#include "JSCInlines.h"
-
namespace JSC { namespace DFG {
FailedFinalizer::FailedFinalizer(Plan& plan)
@@ -41,11 +39,6 @@ FailedFinalizer::~FailedFinalizer()
{
}
-size_t FailedFinalizer::codeSize()
-{
- return 0;
-}
-
bool FailedFinalizer::finalize()
{
return false;
diff --git a/Source/JavaScriptCore/dfg/DFGFailedFinalizer.h b/Source/JavaScriptCore/dfg/DFGFailedFinalizer.h
index 1afbe0dcb..6df5a30ad 100644
--- a/Source/JavaScriptCore/dfg/DFGFailedFinalizer.h
+++ b/Source/JavaScriptCore/dfg/DFGFailedFinalizer.h
@@ -26,6 +26,8 @@
#ifndef DFGFailedFinalizer_h
#define DFGFailedFinalizer_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGFinalizer.h"
@@ -37,7 +39,6 @@ public:
FailedFinalizer(Plan&);
virtual ~FailedFinalizer();
- virtual size_t codeSize() override;
virtual bool finalize() override;
virtual bool finalizeFunction() override;
};
diff --git a/Source/JavaScriptCore/dfg/DFGFiltrationResult.h b/Source/JavaScriptCore/dfg/DFGFiltrationResult.h
index ad0bf4d2e..8c80cea90 100644
--- a/Source/JavaScriptCore/dfg/DFGFiltrationResult.h
+++ b/Source/JavaScriptCore/dfg/DFGFiltrationResult.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,24 +26,14 @@
#ifndef DFGFiltrationResult_h
#define DFGFiltrationResult_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
-// Tells you if an operation that filters type (i.e. does a type check/speculation) will always
-// exit. Formally, this means that the proven type of a value prior to the filter was not
-// bottom (i.e. not "clear" or "SpecEmpty") but becomes bottom as a result of executing the
-// filter.
-//
-// Note that per this definition, a filter will not return Contradiction if the node's proven
-// type was already bottom. This is necessary because we have this yucky convention of using
-// a proven type of bottom for nodes that don't hold JS values, like Phi nodes in ThreadedCPS
-// and storage nodes.
enum FiltrationResult {
- // Means that this operation may not always exit.
FiltrationOK,
-
- // Means taht this operation will always exit.
Contradiction
};
diff --git a/Source/JavaScriptCore/dfg/DFGFinalizer.cpp b/Source/JavaScriptCore/dfg/DFGFinalizer.cpp
index 4e74d7c29..b4025313c 100644
--- a/Source/JavaScriptCore/dfg/DFGFinalizer.cpp
+++ b/Source/JavaScriptCore/dfg/DFGFinalizer.cpp
@@ -29,7 +29,6 @@
#if ENABLE(DFG_JIT)
#include "DFGPlan.h"
-#include "JSCInlines.h"
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGFinalizer.h b/Source/JavaScriptCore/dfg/DFGFinalizer.h
index 3eb71f199..782c744b1 100644
--- a/Source/JavaScriptCore/dfg/DFGFinalizer.h
+++ b/Source/JavaScriptCore/dfg/DFGFinalizer.h
@@ -26,6 +26,8 @@
#ifndef DFGFinalizer_h
#define DFGFinalizer_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "JITCode.h"
@@ -44,7 +46,6 @@ public:
Finalizer(Plan&);
virtual ~Finalizer();
- virtual size_t codeSize() = 0;
virtual bool finalize() = 0;
virtual bool finalizeFunction() = 0;
diff --git a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
index 2b7d064b2..185f03591 100644
--- a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,14 +28,12 @@
#if ENABLE(DFG_JIT)
-#include "ArrayPrototype.h"
#include "DFGGraph.h"
#include "DFGInsertionSet.h"
#include "DFGPhase.h"
#include "DFGPredictionPropagationPhase.h"
#include "DFGVariableAccessDataDump.h"
-#include "JSCInlines.h"
-#include "TypeLocation.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -63,14 +61,12 @@ public:
m_graph.m_argumentPositions[i].mergeArgumentUnboxingAwareness();
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex)
- fixupGetAndSetLocalsInBlock(m_graph.block(blockIndex));
+ fixupSetLocalsInBlock(m_graph.block(blockIndex));
}
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex)
- injectTypeConversionsInBlock(m_graph.block(blockIndex));
-
- m_graph.m_planStage = PlanStage::AfterFixup;
-
+ fixupUntypedSetLocalsInBlock(m_graph.block(blockIndex));
+
return true;
}
@@ -94,7 +90,7 @@ private:
switch (op) {
case SetLocal: {
- // This gets handled by fixupGetAndSetLocalsInBlock().
+ // This gets handled by fixupSetLocalsInBlock().
return;
}
@@ -104,50 +100,41 @@ private:
case BitRShift:
case BitLShift:
case BitURShift: {
- fixIntConvertingEdge(node->child1());
- fixIntConvertingEdge(node->child2());
+ fixBinaryIntEdges();
break;
}
case ArithIMul: {
- fixIntConvertingEdge(node->child1());
- fixIntConvertingEdge(node->child2());
+ fixBinaryIntEdges();
node->setOp(ArithMul);
node->setArithMode(Arith::Unchecked);
node->child1().setUseKind(Int32Use);
node->child2().setUseKind(Int32Use);
break;
}
-
- case ArithClz32: {
- fixIntConvertingEdge(node->child1());
- node->setArithMode(Arith::Unchecked);
- break;
- }
case UInt32ToNumber: {
- fixIntConvertingEdge(node->child1());
+ fixIntEdge(node->child1());
if (bytecodeCanTruncateInteger(node->arithNodeFlags()))
node->convertToIdentity();
- else if (node->canSpeculateInt32(FixupPass))
+ else if (nodeCanSpeculateInt32(node->arithNodeFlags()))
node->setArithMode(Arith::CheckOverflow);
- else {
+ else
node->setArithMode(Arith::DoOverflow);
- node->setResult(NodeResultDouble);
- }
break;
}
case ValueAdd: {
if (attemptToMakeIntegerAdd(node)) {
node->setOp(ArithAdd);
+ node->clearFlags(NodeMustGenerate | NodeClobbersWorld);
break;
}
- if (Node::shouldSpeculateNumberOrBooleanExpectingDefined(node->child1().node(), node->child2().node())) {
- fixDoubleOrBooleanEdge(node->child1());
- fixDoubleOrBooleanEdge(node->child2());
+ if (Node::shouldSpeculateNumberExpectingDefined(node->child1().node(), node->child2().node())) {
+ fixEdge<NumberUse>(node->child1());
+ fixEdge<NumberUse>(node->child2());
node->setOp(ArithAdd);
- node->setResult(NodeResultDouble);
+ node->clearFlags(NodeMustGenerate | NodeClobbersWorld);
break;
}
@@ -186,15 +173,14 @@ private:
case ArithSub: {
if (attemptToMakeIntegerAdd(node))
break;
- fixDoubleOrBooleanEdge(node->child1());
- fixDoubleOrBooleanEdge(node->child2());
- node->setResult(NodeResultDouble);
+ fixEdge<NumberUse>(node->child1());
+ fixEdge<NumberUse>(node->child2());
break;
}
case ArithNegate: {
- if (m_graph.negateShouldSpeculateInt32(node, FixupPass)) {
- fixIntOrBooleanEdge(node->child1());
+ if (m_graph.negateShouldSpeculateInt32(node)) {
+ fixEdge<Int32Use>(node->child1());
if (bytecodeCanTruncateInteger(node->arithNodeFlags()))
node->setArithMode(Arith::Unchecked);
else if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags()))
@@ -203,24 +189,22 @@ private:
node->setArithMode(Arith::CheckOverflowAndNegativeZero);
break;
}
- if (m_graph.negateShouldSpeculateMachineInt(node, FixupPass)) {
- fixEdge<Int52RepUse>(node->child1());
+ if (m_graph.negateShouldSpeculateMachineInt(node)) {
+ fixEdge<MachineIntUse>(node->child1());
if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags()))
node->setArithMode(Arith::CheckOverflow);
else
node->setArithMode(Arith::CheckOverflowAndNegativeZero);
- node->setResult(NodeResultInt52);
break;
}
- fixDoubleOrBooleanEdge(node->child1());
- node->setResult(NodeResultDouble);
+ fixEdge<NumberUse>(node->child1());
break;
}
case ArithMul: {
- if (m_graph.mulShouldSpeculateInt32(node, FixupPass)) {
- fixIntOrBooleanEdge(node->child1());
- fixIntOrBooleanEdge(node->child2());
+ if (m_graph.mulShouldSpeculateInt32(node)) {
+ fixEdge<Int32Use>(node->child1());
+ fixEdge<Int32Use>(node->child2());
if (bytecodeCanTruncateInteger(node->arithNodeFlags()))
node->setArithMode(Arith::Unchecked);
else if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags()))
@@ -229,29 +213,27 @@ private:
node->setArithMode(Arith::CheckOverflowAndNegativeZero);
break;
}
- if (m_graph.mulShouldSpeculateMachineInt(node, FixupPass)) {
- fixEdge<Int52RepUse>(node->child1());
- fixEdge<Int52RepUse>(node->child2());
+ if (m_graph.mulShouldSpeculateMachineInt(node)) {
+ fixEdge<MachineIntUse>(node->child1());
+ fixEdge<MachineIntUse>(node->child2());
if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags()))
node->setArithMode(Arith::CheckOverflow);
else
node->setArithMode(Arith::CheckOverflowAndNegativeZero);
- node->setResult(NodeResultInt52);
break;
}
- fixDoubleOrBooleanEdge(node->child1());
- fixDoubleOrBooleanEdge(node->child2());
- node->setResult(NodeResultDouble);
+ fixEdge<NumberUse>(node->child1());
+ fixEdge<NumberUse>(node->child2());
break;
}
case ArithDiv:
case ArithMod: {
- if (Node::shouldSpeculateInt32OrBooleanForArithmetic(node->child1().node(), node->child2().node())
- && node->canSpeculateInt32(FixupPass)) {
- if (optimizeForX86() || optimizeForARM64() || optimizeForARMv7IDIVSupported()) {
- fixIntOrBooleanEdge(node->child1());
- fixIntOrBooleanEdge(node->child2());
+ if (Node::shouldSpeculateInt32ForArithmetic(node->child1().node(), node->child2().node())
+ && node->canSpeculateInt32()) {
+ if (optimizeForX86() || optimizeForARM64() || optimizeForARMv7s()) {
+ fixEdge<Int32Use>(node->child1());
+ fixEdge<Int32Use>(node->child2());
if (bytecodeCanTruncateInteger(node->arithNodeFlags()))
node->setArithMode(Arith::Unchecked);
else if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags()))
@@ -260,125 +242,84 @@ private:
node->setArithMode(Arith::CheckOverflowAndNegativeZero);
break;
}
+ Edge child1 = node->child1();
+ Edge child2 = node->child2();
- // This will cause conversion nodes to be inserted later.
- fixDoubleOrBooleanEdge(node->child1());
- fixDoubleOrBooleanEdge(node->child2());
-
+ injectInt32ToDoubleNode(node->child1());
+ injectInt32ToDoubleNode(node->child2());
+
// We don't need to do ref'ing on the children because we're stealing them from
// the original division.
Node* newDivision = m_insertionSet.insertNode(
- m_indexInBlock, SpecBytecodeDouble, *node);
- newDivision->setResult(NodeResultDouble);
+ m_indexInBlock, SpecDouble, *node);
node->setOp(DoubleAsInt32);
- node->children.initialize(Edge(newDivision, DoubleRepUse), Edge(), Edge());
+ node->children.initialize(Edge(newDivision, KnownNumberUse), Edge(), Edge());
if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags()))
node->setArithMode(Arith::CheckOverflow);
else
node->setArithMode(Arith::CheckOverflowAndNegativeZero);
+
+ m_insertionSet.insertNode(m_indexInBlock + 1, SpecNone, Phantom, node->codeOrigin, child1, child2);
break;
}
- fixDoubleOrBooleanEdge(node->child1());
- fixDoubleOrBooleanEdge(node->child2());
- node->setResult(NodeResultDouble);
+ fixEdge<NumberUse>(node->child1());
+ fixEdge<NumberUse>(node->child2());
break;
}
case ArithMin:
case ArithMax: {
- if (Node::shouldSpeculateInt32OrBooleanForArithmetic(node->child1().node(), node->child2().node())
- && node->canSpeculateInt32(FixupPass)) {
- fixIntOrBooleanEdge(node->child1());
- fixIntOrBooleanEdge(node->child2());
+ if (Node::shouldSpeculateInt32ForArithmetic(node->child1().node(), node->child2().node())
+ && node->canSpeculateInt32()) {
+ fixEdge<Int32Use>(node->child1());
+ fixEdge<Int32Use>(node->child2());
break;
}
- fixDoubleOrBooleanEdge(node->child1());
- fixDoubleOrBooleanEdge(node->child2());
- node->setResult(NodeResultDouble);
+ fixEdge<NumberUse>(node->child1());
+ fixEdge<NumberUse>(node->child2());
break;
}
case ArithAbs: {
- if (node->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
- && node->canSpeculateInt32(FixupPass)) {
- fixIntOrBooleanEdge(node->child1());
- break;
- }
- fixDoubleOrBooleanEdge(node->child1());
- node->setResult(NodeResultDouble);
- break;
- }
-
- case ArithPow: {
- node->setResult(NodeResultDouble);
- if (node->child2()->shouldSpeculateInt32OrBooleanForArithmetic()) {
- fixDoubleOrBooleanEdge(node->child1());
- fixIntOrBooleanEdge(node->child2());
- break;
- }
-
- fixDoubleOrBooleanEdge(node->child1());
- fixDoubleOrBooleanEdge(node->child2());
- break;
- }
-
- case ArithRound: {
- if (node->child1()->shouldSpeculateInt32OrBooleanForArithmetic() && node->canSpeculateInt32(FixupPass)) {
- fixIntOrBooleanEdge(node->child1());
- insertCheck<Int32Use>(m_indexInBlock, node->child1().node());
- node->convertToIdentity();
+ if (node->child1()->shouldSpeculateInt32ForArithmetic()
+ && node->canSpeculateInt32()) {
+ fixEdge<Int32Use>(node->child1());
break;
}
- fixDoubleOrBooleanEdge(node->child1());
-
- if (isInt32OrBooleanSpeculation(node->getHeapPrediction()) && m_graph.roundShouldSpeculateInt32(node, FixupPass)) {
- node->setResult(NodeResultInt32);
- if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags()))
- node->setArithRoundingMode(Arith::RoundingMode::Int32);
- else
- node->setArithRoundingMode(Arith::RoundingMode::Int32WithNegativeZeroCheck);
- } else {
- node->setResult(NodeResultDouble);
- node->setArithRoundingMode(Arith::RoundingMode::Double);
- }
+ fixEdge<NumberUse>(node->child1());
break;
}
case ArithSqrt:
- case ArithFRound:
case ArithSin:
- case ArithCos:
- case ArithLog: {
- fixDoubleOrBooleanEdge(node->child1());
- node->setResult(NodeResultDouble);
+ case ArithCos: {
+ fixEdge<NumberUse>(node->child1());
break;
}
case LogicalNot: {
- if (node->child1()->shouldSpeculateBoolean()) {
- if (node->child1()->result() == NodeResultBoolean) {
- // This is necessary in case we have a bytecode instruction implemented by:
- //
- // a: CompareEq(...)
- // b: LogicalNot(@a)
- //
- // In that case, CompareEq might have a side-effect. Then, we need to make
- // sure that we know that Branch does not exit.
- fixEdge<KnownBooleanUse>(node->child1());
- } else
- fixEdge<BooleanUse>(node->child1());
- } else if (node->child1()->shouldSpeculateObjectOrOther())
+ if (node->child1()->shouldSpeculateBoolean())
+ fixEdge<BooleanUse>(node->child1());
+ else if (node->child1()->shouldSpeculateObjectOrOther())
fixEdge<ObjectOrOtherUse>(node->child1());
- else if (node->child1()->shouldSpeculateInt32OrBoolean())
- fixIntOrBooleanEdge(node->child1());
+ else if (node->child1()->shouldSpeculateInt32())
+ fixEdge<Int32Use>(node->child1());
else if (node->child1()->shouldSpeculateNumber())
- fixEdge<DoubleRepUse>(node->child1());
+ fixEdge<NumberUse>(node->child1());
else if (node->child1()->shouldSpeculateString())
fixEdge<StringUse>(node->child1());
break;
}
+ case TypeOf: {
+ if (node->child1()->shouldSpeculateString())
+ fixEdge<StringUse>(node->child1());
+ else if (node->child1()->shouldSpeculateCell())
+ fixEdge<CellUse>(node->child1());
+ break;
+ }
+
case CompareEqConstant: {
break;
}
@@ -388,67 +329,70 @@ private:
case CompareLessEq:
case CompareGreater:
case CompareGreaterEq: {
- if (node->op() == CompareEq
- && Node::shouldSpeculateBoolean(node->child1().node(), node->child2().node())) {
- fixEdge<BooleanUse>(node->child1());
- fixEdge<BooleanUse>(node->child2());
- node->clearFlags(NodeMustGenerate);
- break;
- }
- if (Node::shouldSpeculateInt32OrBoolean(node->child1().node(), node->child2().node())) {
- fixIntOrBooleanEdge(node->child1());
- fixIntOrBooleanEdge(node->child2());
- node->clearFlags(NodeMustGenerate);
+ if (Node::shouldSpeculateInt32(node->child1().node(), node->child2().node())) {
+ fixEdge<Int32Use>(node->child1());
+ fixEdge<Int32Use>(node->child2());
+ node->clearFlags(NodeMustGenerate | NodeClobbersWorld);
break;
}
if (enableInt52()
&& Node::shouldSpeculateMachineInt(node->child1().node(), node->child2().node())) {
- fixEdge<Int52RepUse>(node->child1());
- fixEdge<Int52RepUse>(node->child2());
- node->clearFlags(NodeMustGenerate);
+ fixEdge<MachineIntUse>(node->child1());
+ fixEdge<MachineIntUse>(node->child2());
+ node->clearFlags(NodeMustGenerate | NodeClobbersWorld);
break;
}
- if (Node::shouldSpeculateNumberOrBoolean(node->child1().node(), node->child2().node())) {
- fixDoubleOrBooleanEdge(node->child1());
- fixDoubleOrBooleanEdge(node->child2());
- node->clearFlags(NodeMustGenerate);
+ if (Node::shouldSpeculateNumber(node->child1().node(), node->child2().node())) {
+ fixEdge<NumberUse>(node->child1());
+ fixEdge<NumberUse>(node->child2());
+ node->clearFlags(NodeMustGenerate | NodeClobbersWorld);
break;
}
if (node->op() != CompareEq)
break;
+ if (Node::shouldSpeculateBoolean(node->child1().node(), node->child2().node())) {
+ fixEdge<BooleanUse>(node->child1());
+ fixEdge<BooleanUse>(node->child2());
+ node->clearFlags(NodeMustGenerate | NodeClobbersWorld);
+ break;
+ }
if (node->child1()->shouldSpeculateStringIdent() && node->child2()->shouldSpeculateStringIdent()) {
fixEdge<StringIdentUse>(node->child1());
fixEdge<StringIdentUse>(node->child2());
- node->clearFlags(NodeMustGenerate);
+ node->clearFlags(NodeMustGenerate | NodeClobbersWorld);
break;
}
if (node->child1()->shouldSpeculateString() && node->child2()->shouldSpeculateString() && GPRInfo::numberOfRegisters >= 7) {
fixEdge<StringUse>(node->child1());
fixEdge<StringUse>(node->child2());
- node->clearFlags(NodeMustGenerate);
+ node->clearFlags(NodeMustGenerate | NodeClobbersWorld);
break;
}
if (node->child1()->shouldSpeculateObject() && node->child2()->shouldSpeculateObject()) {
fixEdge<ObjectUse>(node->child1());
fixEdge<ObjectUse>(node->child2());
- node->clearFlags(NodeMustGenerate);
+ node->clearFlags(NodeMustGenerate | NodeClobbersWorld);
break;
}
if (node->child1()->shouldSpeculateObject() && node->child2()->shouldSpeculateObjectOrOther()) {
fixEdge<ObjectUse>(node->child1());
fixEdge<ObjectOrOtherUse>(node->child2());
- node->clearFlags(NodeMustGenerate);
+ node->clearFlags(NodeMustGenerate | NodeClobbersWorld);
break;
}
if (node->child1()->shouldSpeculateObjectOrOther() && node->child2()->shouldSpeculateObject()) {
fixEdge<ObjectOrOtherUse>(node->child1());
fixEdge<ObjectUse>(node->child2());
- node->clearFlags(NodeMustGenerate);
+ node->clearFlags(NodeMustGenerate | NodeClobbersWorld);
break;
}
break;
}
+ case CompareStrictEqConstant: {
+ break;
+ }
+
case CompareStrictEq: {
if (Node::shouldSpeculateBoolean(node->child1().node(), node->child2().node())) {
fixEdge<BooleanUse>(node->child1());
@@ -462,13 +406,13 @@ private:
}
if (enableInt52()
&& Node::shouldSpeculateMachineInt(node->child1().node(), node->child2().node())) {
- fixEdge<Int52RepUse>(node->child1());
- fixEdge<Int52RepUse>(node->child2());
+ fixEdge<MachineIntUse>(node->child1());
+ fixEdge<MachineIntUse>(node->child2());
break;
}
if (Node::shouldSpeculateNumber(node->child1().node(), node->child2().node())) {
- fixEdge<DoubleRepUse>(node->child1());
- fixEdge<DoubleRepUse>(node->child2());
+ fixEdge<NumberUse>(node->child1());
+ fixEdge<NumberUse>(node->child2());
break;
}
if (node->child1()->shouldSpeculateStringIdent() && node->child2()->shouldSpeculateStringIdent()) {
@@ -476,58 +420,16 @@ private:
fixEdge<StringIdentUse>(node->child2());
break;
}
- if (node->child1()->shouldSpeculateString() && node->child2()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 7) || isFTL(m_graph.m_plan.mode))) {
+ if (node->child1()->shouldSpeculateString() && node->child2()->shouldSpeculateString() && GPRInfo::numberOfRegisters >= 7) {
fixEdge<StringUse>(node->child1());
fixEdge<StringUse>(node->child2());
break;
}
- WatchpointSet* masqueradesAsUndefinedWatchpoint = m_graph.globalObjectFor(node->origin.semantic)->masqueradesAsUndefinedWatchpoint();
- if (masqueradesAsUndefinedWatchpoint->isStillValid()) {
-
- if (node->child1()->shouldSpeculateObject()) {
- m_graph.watchpoints().addLazily(masqueradesAsUndefinedWatchpoint);
- fixEdge<ObjectUse>(node->child1());
- break;
- }
- if (node->child2()->shouldSpeculateObject()) {
- m_graph.watchpoints().addLazily(masqueradesAsUndefinedWatchpoint);
- fixEdge<ObjectUse>(node->child2());
- break;
- }
-
- } else if (node->child1()->shouldSpeculateObject() && node->child2()->shouldSpeculateObject()) {
+ if (node->child1()->shouldSpeculateObject() && node->child2()->shouldSpeculateObject()) {
fixEdge<ObjectUse>(node->child1());
fixEdge<ObjectUse>(node->child2());
break;
}
- if (node->child1()->shouldSpeculateMisc()) {
- fixEdge<MiscUse>(node->child1());
- break;
- }
- if (node->child2()->shouldSpeculateMisc()) {
- fixEdge<MiscUse>(node->child2());
- break;
- }
- if (node->child1()->shouldSpeculateStringIdent()
- && node->child2()->shouldSpeculateNotStringVar()) {
- fixEdge<StringIdentUse>(node->child1());
- fixEdge<NotStringVarUse>(node->child2());
- break;
- }
- if (node->child2()->shouldSpeculateStringIdent()
- && node->child1()->shouldSpeculateNotStringVar()) {
- fixEdge<StringIdentUse>(node->child2());
- fixEdge<NotStringVarUse>(node->child1());
- break;
- }
- if (node->child1()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 8) || isFTL(m_graph.m_plan.mode))) {
- fixEdge<StringUse>(node->child1());
- break;
- }
- if (node->child2()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 8) || isFTL(m_graph.m_plan.mode))) {
- fixEdge<StringUse>(node->child2());
- break;
- }
break;
}
@@ -546,84 +448,27 @@ private:
}
case GetByVal: {
- if (!node->prediction()) {
- m_insertionSet.insertNode(
- m_indexInBlock, SpecNone, ForceOSRExit, node->origin);
- }
-
node->setArrayMode(
node->arrayMode().refine(
- m_graph, node,
node->child1()->prediction(),
node->child2()->prediction(),
- SpecNone));
+ SpecNone, node->flags()));
blessArrayOperation(node->child1(), node->child2(), node->child3());
ArrayMode arrayMode = node->arrayMode();
switch (arrayMode.type()) {
- case Array::Contiguous:
case Array::Double:
if (arrayMode.arrayClass() == Array::OriginalArray
- && arrayMode.speculation() == Array::InBounds) {
- JSGlobalObject* globalObject = m_graph.globalObjectFor(node->origin.semantic);
- if (globalObject->arrayPrototypeChainIsSane()) {
- // Check if SaneChain will work on a per-type basis. Note that:
- //
- // 1) We don't want double arrays to sometimes return undefined, since
- // that would require a change to the return type and it would pessimise
- // things a lot. So, we'd only want to do that if we actually had
- // evidence that we could read from a hole. That's pretty annoying.
- // Likely the best way to handle that case is with an equivalent of
- // SaneChain for OutOfBounds. For now we just detect when Undefined and
- // NaN are indistinguishable according to backwards propagation, and just
- // use SaneChain in that case. This happens to catch a lot of cases.
- //
- // 2) We don't want int32 array loads to have to do a hole check just to
- // coerce to Undefined, since that would mean twice the checks.
- //
- // This has two implications. First, we have to do more checks than we'd
- // like. It's unfortunate that we have to do the hole check. Second,
- // some accesses that hit a hole will now need to take the full-blown
- // out-of-bounds slow path. We can fix that with:
- // https://bugs.webkit.org/show_bug.cgi?id=144668
-
- bool canDoSaneChain = false;
- switch (arrayMode.type()) {
- case Array::Contiguous:
- // This is happens to be entirely natural. We already would have
- // returned any JSValue, and now we'll return Undefined. We still do
- // the check but it doesn't require taking any kind of slow path.
- canDoSaneChain = true;
- break;
-
- case Array::Double:
- if (!(node->flags() & NodeBytecodeUsesAsOther)) {
- // Holes look like NaN already, so if the user doesn't care
- // about the difference between Undefined and NaN then we can
- // do this.
- canDoSaneChain = true;
- }
- break;
-
- default:
- break;
- }
-
- if (canDoSaneChain) {
- m_graph.watchpoints().addLazily(
- globalObject->arrayPrototype()->structure()->transitionWatchpointSet());
- m_graph.watchpoints().addLazily(
- globalObject->objectPrototype()->structure()->transitionWatchpointSet());
- node->setArrayMode(arrayMode.withSpeculation(Array::SaneChain));
- }
- }
- }
+ && arrayMode.speculation() == Array::InBounds
+ && m_graph.globalObjectFor(node->codeOrigin)->arrayPrototypeChainIsSane()
+ && !(node->flags() & NodeBytecodeUsesAsOther))
+ node->setArrayMode(arrayMode.withSpeculation(Array::SaneChain));
break;
case Array::String:
if ((node->prediction() & ~SpecString)
- || m_graph.hasExitSite(node->origin.semantic, OutOfBounds))
+ || m_graph.hasExitSite(node->codeOrigin, OutOfBounds))
node->setArrayMode(arrayMode.withSpeculation(Array::OutOfBounds));
break;
@@ -631,10 +476,10 @@ private:
break;
}
- arrayMode = node->arrayMode();
- switch (arrayMode.type()) {
+ switch (node->arrayMode().type()) {
case Array::SelectUsingPredictions:
case Array::Unprofiled:
+ case Array::Undecided:
RELEASE_ASSERT_NOT_REACHED();
break;
case Array::Generic:
@@ -650,30 +495,6 @@ private:
break;
}
- switch (arrayMode.type()) {
- case Array::Double:
- if (!arrayMode.isOutOfBounds())
- node->setResult(NodeResultDouble);
- break;
-
- case Array::Float32Array:
- case Array::Float64Array:
- node->setResult(NodeResultDouble);
- break;
-
- case Array::Uint32Array:
- if (node->shouldSpeculateInt32())
- break;
- if (node->shouldSpeculateMachineInt() && enableInt52())
- node->setResult(NodeResultInt52);
- else
- node->setResult(NodeResultDouble);
- break;
-
- default:
- break;
- }
-
break;
}
@@ -686,7 +507,6 @@ private:
node->setArrayMode(
node->arrayMode().refine(
- m_graph, node,
child1->prediction(),
child2->prediction(),
child3->prediction()));
@@ -695,7 +515,6 @@ private:
switch (node->arrayMode().modeForPut().type()) {
case Array::SelectUsingPredictions:
- case Array::SelectUsingArguments:
case Array::Unprofiled:
case Array::Undecided:
RELEASE_ASSERT_NOT_REACHED();
@@ -713,11 +532,15 @@ private:
fixEdge<KnownCellUse>(child1);
fixEdge<Int32Use>(child2);
fixEdge<Int32Use>(child3);
+ if (child3->prediction() & SpecInt52)
+ fixEdge<MachineIntUse>(child3);
+ else
+ fixEdge<Int32Use>(child3);
break;
case Array::Double:
fixEdge<KnownCellUse>(child1);
fixEdge<Int32Use>(child2);
- fixEdge<DoubleRepRealUse>(child3);
+ fixEdge<RealNumberUse>(child3);
break;
case Array::Int8Array:
case Array::Int16Array:
@@ -729,24 +552,25 @@ private:
fixEdge<KnownCellUse>(child1);
fixEdge<Int32Use>(child2);
if (child3->shouldSpeculateInt32())
- fixIntOrBooleanEdge(child3);
+ fixEdge<Int32Use>(child3);
else if (child3->shouldSpeculateMachineInt())
- fixEdge<Int52RepUse>(child3);
+ fixEdge<MachineIntUse>(child3);
else
- fixDoubleOrBooleanEdge(child3);
+ fixEdge<NumberUse>(child3);
break;
case Array::Float32Array:
case Array::Float64Array:
fixEdge<KnownCellUse>(child1);
fixEdge<Int32Use>(child2);
- fixDoubleOrBooleanEdge(child3);
+ fixEdge<NumberUse>(child3);
break;
case Array::Contiguous:
case Array::ArrayStorage:
case Array::SlowPutArrayStorage:
+ case Array::Arguments:
fixEdge<KnownCellUse>(child1);
fixEdge<Int32Use>(child2);
- speculateForBarrier(child3);
+ insertStoreBarrier(m_indexInBlock, child1, child3);
break;
default:
fixEdge<KnownCellUse>(child1);
@@ -768,7 +592,6 @@ private:
// that would break things.
node->setArrayMode(
node->arrayMode().refine(
- m_graph, node,
node->child1()->prediction() & SpecCell,
SpecInt32,
node->child2()->prediction()));
@@ -780,11 +603,11 @@ private:
fixEdge<Int32Use>(node->child2());
break;
case Array::Double:
- fixEdge<DoubleRepRealUse>(node->child2());
+ fixEdge<RealNumberUse>(node->child2());
break;
case Array::Contiguous:
case Array::ArrayStorage:
- speculateForBarrier(node->child2());
+ insertStoreBarrier(m_indexInBlock, node->child1(), node->child2());
break;
default:
break;
@@ -806,26 +629,48 @@ private:
}
case Branch: {
- if (node->child1()->shouldSpeculateBoolean()) {
- if (node->child1()->result() == NodeResultBoolean) {
- // This is necessary in case we have a bytecode instruction implemented by:
- //
- // a: CompareEq(...)
- // b: Branch(@a)
- //
- // In that case, CompareEq might have a side-effect. Then, we need to make
- // sure that we know that Branch does not exit.
- fixEdge<KnownBooleanUse>(node->child1());
- } else
- fixEdge<BooleanUse>(node->child1());
- } else if (node->child1()->shouldSpeculateObjectOrOther())
+ if (node->child1()->shouldSpeculateBoolean())
+ fixEdge<BooleanUse>(node->child1());
+ else if (node->child1()->shouldSpeculateObjectOrOther())
fixEdge<ObjectOrOtherUse>(node->child1());
- else if (node->child1()->shouldSpeculateInt32OrBoolean())
- fixIntOrBooleanEdge(node->child1());
+ else if (node->child1()->shouldSpeculateInt32())
+ fixEdge<Int32Use>(node->child1());
else if (node->child1()->shouldSpeculateNumber())
- fixEdge<DoubleRepUse>(node->child1());
- else if (node->child1()->shouldSpeculateString())
- fixEdge<StringUse>(node->child1());
+ fixEdge<NumberUse>(node->child1());
+
+ Node* logicalNot = node->child1().node();
+ if (logicalNot->op() == LogicalNot) {
+
+ // Make sure that OSR exit can't observe the LogicalNot. If it can,
+ // then we must compute it and cannot peephole around it.
+ bool found = false;
+ bool ok = true;
+ for (unsigned i = m_indexInBlock; i--;) {
+ Node* candidate = m_block->at(i);
+ if (candidate == logicalNot) {
+ found = true;
+ break;
+ }
+ if (candidate->canExit()) {
+ ok = false;
+ found = true;
+ break;
+ }
+ }
+ ASSERT_UNUSED(found, found);
+
+ if (ok) {
+ Edge newChildEdge = logicalNot->child1();
+ if (newChildEdge->hasBooleanResult()) {
+ node->children.setChild1(newChildEdge);
+
+ BasicBlock* toBeTaken = node->notTakenBlock();
+ BasicBlock* toBeNotTaken = node->takenBlock();
+ node->setTakenBlock(toBeTaken);
+ node->setNotTakenBlock(toBeNotTaken);
+ }
+ }
+ }
break;
}
@@ -846,12 +691,6 @@ private:
else if (node->child1()->shouldSpeculateString())
fixEdge<StringUse>(node->child1());
break;
- case SwitchCell:
- if (node->child1()->shouldSpeculateCell())
- fixEdge<CellUse>(node->child1());
- // else it's fine for this to have UntypedUse; we will handle this by just making
- // non-cells take the default case.
- break;
}
break;
}
@@ -861,9 +700,8 @@ private:
break;
}
- case ToString:
- case CallStringConstructor: {
- fixupToStringOrCallStringConstructor(node);
+ case ToString: {
+ fixupToString(node);
break;
}
@@ -888,7 +726,7 @@ private:
// would have already exited by now, but insert a forced exit just to
// be safe.
m_insertionSet.insertNode(
- m_indexInBlock, SpecNone, ForceOSRExit, node->origin);
+ m_indexInBlock, SpecNone, ForceOSRExit, node->codeOrigin);
}
break;
case ALL_INT32_INDEXING_TYPES:
@@ -897,7 +735,7 @@ private:
break;
case ALL_DOUBLE_INDEXING_TYPES:
for (unsigned operandIndex = 0; operandIndex < node->numChildren(); ++operandIndex)
- fixEdge<DoubleRepRealUse>(m_graph.m_varArgChildren[node->firstChild() + operandIndex]);
+ fixEdge<RealNumberUse>(m_graph.m_varArgChildren[node->firstChild() + operandIndex]);
break;
case ALL_CONTIGUOUS_INDEXING_TYPES:
case ALL_ARRAY_STORAGE_INDEXING_TYPES:
@@ -912,7 +750,7 @@ private:
case NewTypedArray: {
if (node->child1()->shouldSpeculateInt32()) {
fixEdge<Int32Use>(node->child1());
- node->clearFlags(NodeMustGenerate);
+ node->clearFlags(NodeMustGenerate | NodeClobbersWorld);
break;
}
break;
@@ -924,9 +762,9 @@ private:
}
case ToThis: {
- ECMAMode ecmaMode = m_graph.executableFor(node->origin.semantic)->isStrictMode() ? StrictMode : NotStrictMode;
+ ECMAMode ecmaMode = m_graph.executableFor(node->codeOrigin)->isStrictMode() ? StrictMode : NotStrictMode;
- if (node->child1()->shouldSpeculateOther()) {
+ if (isOtherSpeculation(node->child1()->prediction())) {
if (ecmaMode == StrictMode) {
fixEdge<OtherUse>(node->child1());
node->convertToIdentity();
@@ -934,11 +772,10 @@ private:
}
m_insertionSet.insertNode(
- m_indexInBlock, SpecNone, Check, node->origin,
+ m_indexInBlock, SpecNone, Phantom, node->codeOrigin,
Edge(node->child1().node(), OtherUse));
observeUseKindOnNode<OtherUse>(node->child1().node());
- m_graph.convertToConstant(
- node, m_graph.globalThisObjectFor(node->origin.semantic));
+ node->convertToWeakConstant(m_graph.globalThisObjectFor(node->codeOrigin));
break;
}
@@ -951,28 +788,28 @@ private:
break;
}
- case PutStructure: {
- fixEdge<KnownCellUse>(node->child1());
+ case GetMyArgumentByVal:
+ case GetMyArgumentByValSafe: {
+ fixEdge<Int32Use>(node->child1());
break;
}
- case GetClosureVar:
- case GetFromArguments: {
+ case PutStructure: {
fixEdge<KnownCellUse>(node->child1());
+ insertStoreBarrier(m_indexInBlock, node->child1());
break;
}
- case PutClosureVar:
- case PutToArguments: {
+ case PutClosureVar: {
fixEdge<KnownCellUse>(node->child1());
- speculateForBarrier(node->child2());
+ insertStoreBarrier(m_indexInBlock, node->child1(), node->child3());
break;
}
-
+
+ case GetClosureRegisters:
+ case SkipTopScope:
case SkipScope:
- case GetScope:
- case GetGetter:
- case GetSetter: {
+ case GetScope: {
fixEdge<KnownCellUse>(node->child1());
break;
}
@@ -980,6 +817,7 @@ private:
case AllocatePropertyStorage:
case ReallocatePropertyStorage: {
fixEdge<KnownCellUse>(node->child1());
+ insertStoreBarrier(m_indexInBlock + 1, node->child1());
break;
}
@@ -987,60 +825,40 @@ private:
case GetByIdFlush: {
if (!node->child1()->shouldSpeculateCell())
break;
-
- // If we hadn't exited because of BadCache, BadIndexingType, or ExoticObjectMode, then
- // leave this as a GetById.
- if (!m_graph.hasExitSite(node->origin.semantic, BadCache)
- && !m_graph.hasExitSite(node->origin.semantic, BadIndexingType)
- && !m_graph.hasExitSite(node->origin.semantic, ExoticObjectMode)) {
- auto uid = m_graph.identifiers()[node->identifierNumber()];
- if (uid == vm().propertyNames->length.impl()) {
- attemptToMakeGetArrayLength(node);
- break;
- }
- if (uid == vm().propertyNames->byteLength.impl()) {
- attemptToMakeGetTypedArrayByteLength(node);
- break;
- }
- if (uid == vm().propertyNames->byteOffset.impl()) {
- attemptToMakeGetTypedArrayByteOffset(node);
- break;
- }
+ StringImpl* impl = m_graph.identifiers()[node->identifierNumber()];
+ if (impl == vm().propertyNames->length.impl()) {
+ attemptToMakeGetArrayLength(node);
+ break;
+ }
+ if (impl == vm().propertyNames->byteLength.impl()) {
+ attemptToMakeGetTypedArrayByteLength(node);
+ break;
+ }
+ if (impl == vm().propertyNames->byteOffset.impl()) {
+ attemptToMakeGetTypedArrayByteOffset(node);
+ break;
}
fixEdge<CellUse>(node->child1());
break;
}
case PutById:
- case PutByIdFlush:
case PutByIdDirect: {
fixEdge<CellUse>(node->child1());
- speculateForBarrier(node->child2());
+ insertStoreBarrier(m_indexInBlock, node->child1(), node->child2());
break;
}
- case GetExecutable: {
- fixEdge<FunctionUse>(node->child1());
- break;
- }
-
+ case CheckExecutable:
case CheckStructure:
- case CheckCell:
+ case StructureTransitionWatchpoint:
+ case CheckFunction:
case CheckHasInstance:
case CreateThis:
case GetButterfly: {
fixEdge<CellUse>(node->child1());
break;
}
-
- case CheckIdent: {
- UniquedStringImpl* uid = node->uidOperand();
- if (uid->isSymbol())
- fixEdge<SymbolUse>(node->child1());
- else
- fixEdge<StringIdentUse>(node->child1());
- break;
- }
case Arrayify:
case ArrayifyToStructure: {
@@ -1050,34 +868,24 @@ private:
break;
}
- case GetByOffset:
- case GetGetterSetterByOffset: {
+ case GetByOffset: {
if (!node->child1()->hasStorageResult())
fixEdge<KnownCellUse>(node->child1());
fixEdge<KnownCellUse>(node->child2());
break;
}
- case MultiGetByOffset: {
- fixEdge<CellUse>(node->child1());
- break;
- }
-
case PutByOffset: {
if (!node->child1()->hasStorageResult())
fixEdge<KnownCellUse>(node->child1());
fixEdge<KnownCellUse>(node->child2());
- speculateForBarrier(node->child3());
- break;
- }
-
- case MultiPutByOffset: {
- fixEdge<CellUse>(node->child1());
- speculateForBarrier(node->child2());
+ insertStoreBarrier(m_indexInBlock, node->child2(), node->child3());
break;
}
case InstanceOf: {
+ // FIXME: This appears broken: CheckHasInstance already does an unconditional cell
+ // check. https://bugs.webkit.org/show_bug.cgi?id=107479
if (!(node->child1()->prediction() & ~SpecCell))
fixEdge<CellUse>(node->child1());
fixEdge<CellUse>(node->child2());
@@ -1092,228 +900,93 @@ private:
break;
}
- case Check: {
- m_graph.doToChildren(
- node,
- [&] (Edge& edge) {
- switch (edge.useKind()) {
- case NumberUse:
- if (edge->shouldSpeculateInt32ForArithmetic())
- edge.setUseKind(Int32Use);
- break;
- default:
- break;
- }
- observeUseKindOnEdge(edge);
- });
- break;
- }
-
case Phantom:
- // Phantoms are meaningless past Fixup. We recreate them on-demand in the backend.
- node->remove();
- break;
-
- case FiatInt52: {
- RELEASE_ASSERT(enableInt52());
- node->convertToIdentity();
- fixEdge<Int52RepUse>(node->child1());
- node->setResult(NodeResultInt52);
+ case Identity:
+ case Check: {
+ switch (node->child1().useKind()) {
+ case NumberUse:
+ if (node->child1()->shouldSpeculateInt32ForArithmetic())
+ node->child1().setUseKind(Int32Use);
+ break;
+ default:
+ break;
+ }
+ observeUseKindOnEdge(node->child1());
break;
}
case GetArrayLength:
case Phi:
case Upsilon:
+ case GetArgument:
+ case PhantomPutStructure:
case GetIndexedPropertyStorage:
case GetTypedArrayByteOffset:
case LastNodeType:
case CheckTierUpInLoop:
case CheckTierUpAtReturn:
case CheckTierUpAndOSREnter:
- case CheckTierUpWithNestedTriggerAndOSREnter:
+ case Int52ToDouble:
+ case Int52ToValue:
case InvalidationPoint:
case CheckArray:
case CheckInBounds:
case ConstantStoragePointer:
case DoubleAsInt32:
+ case Int32ToDouble:
case ValueToInt32:
- case DoubleRep:
- case ValueRep:
- case Int52Rep:
- case Int52Constant:
- case Identity: // This should have been cleaned up.
- case BooleanToNumber:
- case PhantomNewObject:
- case PhantomNewFunction:
- case PhantomCreateActivation:
- case PhantomDirectArguments:
- case PhantomClonedArguments:
- case ForwardVarargs:
- case GetMyArgumentByVal:
- case PutHint:
- case CheckStructureImmediate:
- case MaterializeNewObject:
- case MaterializeCreateActivation:
- case PutStack:
- case KillStack:
- case GetStack:
- case StoreBarrier:
// These are just nodes that we don't currently expect to see during fixup.
// If we ever wanted to insert them prior to fixup, then we just have to create
// fixup rules for them.
- DFG_CRASH(m_graph, node, "Unexpected node during fixup");
+ RELEASE_ASSERT_NOT_REACHED();
break;
case PutGlobalVar: {
- fixEdge<CellUse>(node->child1());
- speculateForBarrier(node->child2());
+ Node* globalObjectNode = m_insertionSet.insertNode(m_indexInBlock, SpecNone, WeakJSConstant, node->codeOrigin,
+ OpInfo(m_graph.globalObjectFor(node->codeOrigin)));
+ Node* barrierNode = m_graph.addNode(SpecNone, ConditionalStoreBarrier, m_currentNode->codeOrigin,
+ Edge(globalObjectNode, KnownCellUse), Edge(node->child1().node(), UntypedUse));
+ fixupNode(barrierNode);
+ m_insertionSet.insert(m_indexInBlock, barrierNode);
+ break;
+ }
+
+ case TearOffActivation: {
+ Node* barrierNode = m_graph.addNode(SpecNone, StoreBarrierWithNullCheck, m_currentNode->codeOrigin,
+ Edge(node->child1().node(), UntypedUse));
+ fixupNode(barrierNode);
+ m_insertionSet.insert(m_indexInBlock, barrierNode);
break;
}
case IsString:
if (node->child1()->shouldSpeculateString()) {
- m_insertionSet.insertNode(
- m_indexInBlock, SpecNone, Check, node->origin,
+ m_insertionSet.insertNode(m_indexInBlock, SpecNone, Phantom, node->codeOrigin,
Edge(node->child1().node(), StringUse));
m_graph.convertToConstant(node, jsBoolean(true));
observeUseKindOnNode<StringUse>(node);
}
break;
-
- case IsObject:
- if (node->child1()->shouldSpeculateObject()) {
- m_insertionSet.insertNode(
- m_indexInBlock, SpecNone, Check, node->origin,
- Edge(node->child1().node(), ObjectUse));
- m_graph.convertToConstant(node, jsBoolean(true));
- observeUseKindOnNode<ObjectUse>(node);
- }
- break;
-
- case GetEnumerableLength: {
- fixEdge<CellUse>(node->child1());
- break;
- }
- case HasGenericProperty: {
- fixEdge<CellUse>(node->child2());
- break;
- }
- case HasStructureProperty: {
- fixEdge<StringUse>(node->child2());
- fixEdge<KnownCellUse>(node->child3());
- break;
- }
- case HasIndexedProperty: {
- node->setArrayMode(
- node->arrayMode().refine(
- m_graph, node,
- node->child1()->prediction(),
- node->child2()->prediction(),
- SpecNone));
-
- blessArrayOperation(node->child1(), node->child2(), node->child3());
- fixEdge<CellUse>(node->child1());
- fixEdge<KnownInt32Use>(node->child2());
- break;
- }
- case GetDirectPname: {
- Edge& base = m_graph.varArgChild(node, 0);
- Edge& property = m_graph.varArgChild(node, 1);
- Edge& index = m_graph.varArgChild(node, 2);
- Edge& enumerator = m_graph.varArgChild(node, 3);
- fixEdge<CellUse>(base);
- fixEdge<KnownCellUse>(property);
- fixEdge<KnownInt32Use>(index);
- fixEdge<KnownCellUse>(enumerator);
- break;
- }
- case GetPropertyEnumerator: {
- fixEdge<CellUse>(node->child1());
- break;
- }
- case GetEnumeratorStructurePname: {
- fixEdge<KnownCellUse>(node->child1());
- fixEdge<KnownInt32Use>(node->child2());
- break;
- }
- case GetEnumeratorGenericPname: {
- fixEdge<KnownCellUse>(node->child1());
- fixEdge<KnownInt32Use>(node->child2());
- break;
- }
- case ToIndexString: {
- fixEdge<KnownInt32Use>(node->child1());
- break;
- }
- case ProfileType: {
- // We want to insert type checks based on the instructionTypeSet of the TypeLocation, not the globalTypeSet.
- // Because the instructionTypeSet is contained in globalTypeSet, if we produce a type check for
- // type T for the instructionTypeSet, the global type set must also have information for type T.
- // So if it the type check succeeds for type T in the instructionTypeSet, a type check for type T
- // in the globalTypeSet would've also succeeded.
- // (The other direction does not hold in general).
-
- RefPtr<TypeSet> typeSet = node->typeLocation()->m_instructionTypeSet;
- RuntimeTypeMask seenTypes = typeSet->seenTypes();
- if (typeSet->doesTypeConformTo(TypeMachineInt)) {
- if (node->child1()->shouldSpeculateInt32())
- fixEdge<Int32Use>(node->child1());
- else
- fixEdge<MachineIntUse>(node->child1());
- node->remove();
- } else if (typeSet->doesTypeConformTo(TypeNumber | TypeMachineInt)) {
- fixEdge<NumberUse>(node->child1());
- node->remove();
- } else if (typeSet->doesTypeConformTo(TypeString)) {
- fixEdge<StringUse>(node->child1());
- node->remove();
- } else if (typeSet->doesTypeConformTo(TypeBoolean)) {
- fixEdge<BooleanUse>(node->child1());
- node->remove();
- } else if (typeSet->doesTypeConformTo(TypeUndefined | TypeNull) && (seenTypes & TypeUndefined) && (seenTypes & TypeNull)) {
- fixEdge<OtherUse>(node->child1());
- node->remove();
- } else if (typeSet->doesTypeConformTo(TypeObject)) {
- StructureSet set = typeSet->structureSet();
- if (!set.isEmpty()) {
- fixEdge<CellUse>(node->child1());
- node->convertToCheckStructure(m_graph.addStructureSet(set));
- }
- }
-
- break;
- }
-
- case CreateScopedArguments:
- case CreateActivation:
- case NewFunction: {
- fixEdge<CellUse>(node->child1());
- break;
- }
#if !ASSERT_DISABLED
// Have these no-op cases here to ensure that nobody forgets to add handlers for new opcodes.
case SetArgument:
case JSConstant:
- case DoubleConstant:
+ case WeakJSConstant:
case GetLocal:
case GetCallee:
- case GetArgumentCount:
case Flush:
case PhantomLocal:
case GetLocalUnlinked:
+ case GetMyScope:
+ case GetClosureVar:
case GetGlobalVar:
case NotifyWrite:
+ case VariableWatchpoint:
case VarInjectionWatchpoint:
+ case AllocationProfileWatchpoint:
case Call:
case Construct:
- case CallVarargs:
- case ConstructVarargs:
- case CallForwardVarargs:
- case ConstructForwardVarargs:
- case LoadVarargs:
- case ProfileControlFlow:
case NewObject:
case NewArrayBuffer:
case NewRegexp:
@@ -1323,39 +996,58 @@ private:
case IsUndefined:
case IsBoolean:
case IsNumber:
- case IsObjectOrNull:
+ case IsObject:
case IsFunction:
- case CreateDirectArguments:
- case CreateClonedArguments:
+ case CreateActivation:
+ case CreateArguments:
+ case PhantomArguments:
+ case TearOffArguments:
+ case GetMyArgumentsLength:
+ case GetMyArgumentsLengthSafe:
+ case CheckArgumentsNotCreated:
+ case NewFunction:
+ case NewFunctionNoCheck:
+ case NewFunctionExpression:
case Jump:
case Return:
case Throw:
case ThrowReferenceError:
case CountExecution:
case ForceOSRExit:
- case CheckBadCell:
- case CheckNotEmpty:
case CheckWatchdogTimer:
case Unreachable:
case ExtractOSREntryLocal:
case LoopHint:
+ case StoreBarrier:
+ case ConditionalStoreBarrier:
+ case StoreBarrierWithNullCheck:
+ case FunctionReentryWatchpoint:
+ case TypedArrayWatchpoint:
case MovHint:
case ZombieHint:
- case BottomValue:
- case TypeOf:
break;
#else
default:
break;
#endif
}
+
+ if (!node->containsMovHint())
+ DFG_NODE_DO_TO_CHILDREN(m_graph, node, observeUntypedEdge);
+ }
+
+ void observeUntypedEdge(Node*, Edge& edge)
+ {
+ if (edge.useKind() != UntypedUse)
+ return;
+ fixEdge<UntypedUse>(edge);
}
template<UseKind useKind>
void createToString(Node* node, Edge& edge)
{
edge.setNode(m_insertionSet.insertNode(
- m_indexInBlock, SpecString, ToString, node->origin,
+ m_indexInBlock, SpecString, ToString, node->codeOrigin,
Edge(edge.node(), useKind)));
}
@@ -1364,7 +1056,7 @@ private:
{
ASSERT(arrayMode == ArrayMode(Array::Generic));
- if (!canOptimizeStringObjectAccess(node->origin.semantic))
+ if (!canOptimizeStringObjectAccess(node->codeOrigin))
return;
createToString<useKind>(node, node->child1());
@@ -1392,7 +1084,7 @@ private:
// decision process much easier.
observeUseKindOnNode<StringUse>(edge.node());
m_insertionSet.insertNode(
- m_indexInBlock, SpecNone, Check, node->origin,
+ m_indexInBlock, SpecNone, Phantom, node->codeOrigin,
Edge(edge.node(), StringUse));
edge.setUseKind(KnownStringUse);
return;
@@ -1417,9 +1109,9 @@ private:
if (!edge)
break;
edge.setUseKind(KnownStringUse);
- JSString* string = edge->dynamicCastConstant<JSString*>();
- if (!string)
+ if (!m_graph.isConstant(edge.node()))
continue;
+ JSString* string = jsCast<JSString*>(m_graph.valueOfJSConstant(edge.node()).asCell());
if (string->length())
continue;
@@ -1451,21 +1143,21 @@ private:
}
if (node->child1()->shouldSpeculateStringObject()
- && canOptimizeStringObjectAccess(node->origin.semantic)) {
+ && canOptimizeStringObjectAccess(node->codeOrigin)) {
fixEdge<StringObjectUse>(node->child1());
node->convertToToString();
return;
}
if (node->child1()->shouldSpeculateStringOrStringObject()
- && canOptimizeStringObjectAccess(node->origin.semantic)) {
+ && canOptimizeStringObjectAccess(node->codeOrigin)) {
fixEdge<StringOrStringObjectUse>(node->child1());
node->convertToToString();
return;
}
}
- void fixupToStringOrCallStringConstructor(Node* node)
+ void fixupToString(Node* node)
{
if (node->child1()->shouldSpeculateString()) {
fixEdge<StringUse>(node->child1());
@@ -1474,13 +1166,13 @@ private:
}
if (node->child1()->shouldSpeculateStringObject()
- && canOptimizeStringObjectAccess(node->origin.semantic)) {
+ && canOptimizeStringObjectAccess(node->codeOrigin)) {
fixEdge<StringObjectUse>(node->child1());
return;
}
if (node->child1()->shouldSpeculateStringOrStringObject()
- && canOptimizeStringObjectAccess(node->origin.semantic)) {
+ && canOptimizeStringObjectAccess(node->codeOrigin)) {
fixEdge<StringOrStringObjectUse>(node->child1());
return;
}
@@ -1494,18 +1186,21 @@ private:
template<UseKind leftUseKind>
bool attemptToMakeFastStringAdd(Node* node, Edge& left, Edge& right)
{
+ Node* originalLeft = left.node();
+ Node* originalRight = right.node();
+
ASSERT(leftUseKind == StringUse || leftUseKind == StringObjectUse || leftUseKind == StringOrStringObjectUse);
- if (isStringObjectUse<leftUseKind>() && !canOptimizeStringObjectAccess(node->origin.semantic))
+ if (isStringObjectUse<leftUseKind>() && !canOptimizeStringObjectAccess(node->codeOrigin))
return false;
convertStringAddUse<leftUseKind>(node, left);
if (right->shouldSpeculateString())
convertStringAddUse<StringUse>(node, right);
- else if (right->shouldSpeculateStringObject() && canOptimizeStringObjectAccess(node->origin.semantic))
+ else if (right->shouldSpeculateStringObject() && canOptimizeStringObjectAccess(node->codeOrigin))
convertStringAddUse<StringObjectUse>(node, right);
- else if (right->shouldSpeculateStringOrStringObject() && canOptimizeStringObjectAccess(node->origin.semantic))
+ else if (right->shouldSpeculateStringOrStringObject() && canOptimizeStringObjectAccess(node->codeOrigin))
convertStringAddUse<StringOrStringObjectUse>(node, right);
else {
// At this point we know that the other operand is something weird. The semantically correct
@@ -1517,43 +1212,43 @@ private:
// anything to @right, since ToPrimitive may be effectful.
Node* toPrimitive = m_insertionSet.insertNode(
- m_indexInBlock, resultOfToPrimitive(right->prediction()), ToPrimitive,
- node->origin, Edge(right.node()));
+ m_indexInBlock, resultOfToPrimitive(right->prediction()), ToPrimitive, node->codeOrigin,
+ Edge(right.node()));
Node* toString = m_insertionSet.insertNode(
- m_indexInBlock, SpecString, ToString, node->origin, Edge(toPrimitive));
+ m_indexInBlock, SpecString, ToString, node->codeOrigin, Edge(toPrimitive));
fixupToPrimitive(toPrimitive);
-
- // Don't fix up ToString. ToString and ToPrimitive are originated from the same bytecode and
- // ToPrimitive may have an observable side effect. ToString should not be converted into Check
- // with speculative type check because OSR exit reproduce an observable side effect done in
- // ToPrimitive.
-
+ fixupToString(toString);
+
right.setNode(toString);
}
+ // We're doing checks up there, so we need to make sure that the
+ // *original* inputs to the addition are live up to here.
+ m_insertionSet.insertNode(
+ m_indexInBlock, SpecNone, Phantom, node->codeOrigin,
+ Edge(originalLeft), Edge(originalRight));
+
convertToMakeRope(node);
return true;
}
- bool isStringPrototypeMethodSane(
- JSObject* stringPrototype, Structure* stringPrototypeStructure, UniquedStringImpl* uid)
+ bool isStringPrototypeMethodSane(Structure* stringPrototypeStructure, StringImpl* uid)
{
unsigned attributesUnused;
- PropertyOffset offset =
- stringPrototypeStructure->getConcurrently(uid, attributesUnused);
+ JSCell* specificValue;
+ PropertyOffset offset = stringPrototypeStructure->getConcurrently(
+ vm(), uid, attributesUnused, specificValue);
if (!isValidOffset(offset))
return false;
- JSValue value = m_graph.tryGetConstantProperty(
- stringPrototype, stringPrototypeStructure, offset);
- if (!value)
+ if (!specificValue)
return false;
- JSFunction* function = jsDynamicCast<JSFunction*>(value);
- if (!function)
+ if (!specificValue->inherits(JSFunction::info()))
return false;
+ JSFunction* function = jsCast<JSFunction*>(specificValue);
if (function->executable()->intrinsicFor(CodeForCall) != StringPrototypeValueOfIntrinsic)
return false;
@@ -1571,7 +1266,7 @@ private:
JSObject* stringPrototypeObject = asObject(stringObjectStructure->storedPrototype());
Structure* stringPrototypeStructure = stringPrototypeObject->structure();
- if (m_graph.registerStructure(stringPrototypeStructure) != StructureRegisteredAndWatched)
+ if (!m_graph.watchpoints().isStillValid(stringPrototypeStructure->transitionWatchpointSet()))
return false;
if (stringPrototypeStructure->isDictionary())
@@ -1582,15 +1277,15 @@ private:
// (that would call toString()). We don't want the DFG to have to distinguish
// between the two, just because that seems like it would get confusing. So we
// just require both methods to be sane.
- if (!isStringPrototypeMethodSane(stringPrototypeObject, stringPrototypeStructure, vm().propertyNames->valueOf.impl()))
+ if (!isStringPrototypeMethodSane(stringPrototypeStructure, vm().propertyNames->valueOf.impl()))
return false;
- if (!isStringPrototypeMethodSane(stringPrototypeObject, stringPrototypeStructure, vm().propertyNames->toString.impl()))
+ if (!isStringPrototypeMethodSane(stringPrototypeStructure, vm().propertyNames->toString.impl()))
return false;
return true;
}
- void fixupGetAndSetLocalsInBlock(BasicBlock* block)
+ void fixupSetLocalsInBlock(BasicBlock* block)
{
if (!block)
return;
@@ -1598,49 +1293,28 @@ private:
m_block = block;
for (m_indexInBlock = 0; m_indexInBlock < block->size(); ++m_indexInBlock) {
Node* node = m_currentNode = block->at(m_indexInBlock);
- if (node->op() != SetLocal && node->op() != GetLocal)
+ if (node->op() != SetLocal)
continue;
VariableAccessData* variable = node->variableAccessData();
- switch (node->op()) {
- case GetLocal:
- switch (variable->flushFormat()) {
- case FlushedDouble:
- node->setResult(NodeResultDouble);
- break;
- case FlushedInt52:
- node->setResult(NodeResultInt52);
- break;
- default:
- break;
- }
+ switch (variable->flushFormat()) {
+ case FlushedJSValue:
break;
-
- case SetLocal:
- switch (variable->flushFormat()) {
- case FlushedJSValue:
- break;
- case FlushedDouble:
- fixEdge<DoubleRepUse>(node->child1());
- break;
- case FlushedInt32:
- fixEdge<Int32Use>(node->child1());
- break;
- case FlushedInt52:
- fixEdge<Int52RepUse>(node->child1());
- break;
- case FlushedCell:
- fixEdge<CellUse>(node->child1());
- break;
- case FlushedBoolean:
- fixEdge<BooleanUse>(node->child1());
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
+ case FlushedDouble:
+ fixEdge<NumberUse>(node->child1());
+ break;
+ case FlushedInt32:
+ fixEdge<Int32Use>(node->child1());
+ break;
+ case FlushedInt52:
+ fixEdge<MachineIntUse>(node->child1());
+ break;
+ case FlushedCell:
+ fixEdge<CellUse>(node->child1());
+ break;
+ case FlushedBoolean:
+ fixEdge<BooleanUse>(node->child1());
break;
-
default:
RELEASE_ASSERT_NOT_REACHED();
break;
@@ -1649,38 +1323,54 @@ private:
m_insertionSet.execute(block);
}
- Node* checkArray(ArrayMode arrayMode, const NodeOrigin& origin, Node* array, Node* index, bool (*storageCheck)(const ArrayMode&) = canCSEStorage)
+ void fixupUntypedSetLocalsInBlock(BasicBlock* block)
+ {
+ if (!block)
+ return;
+ ASSERT(block->isReachable);
+ m_block = block;
+ for (m_indexInBlock = 0; m_indexInBlock < block->size(); ++m_indexInBlock) {
+ Node* node = m_currentNode = block->at(m_indexInBlock);
+ if (node->op() != SetLocal)
+ continue;
+
+ if (node->child1().useKind() == UntypedUse)
+ fixEdge<UntypedUse>(node->child1());
+ }
+ m_insertionSet.execute(block);
+ }
+
+ Node* checkArray(ArrayMode arrayMode, const CodeOrigin& codeOrigin, Node* array, Node* index, bool (*storageCheck)(const ArrayMode&) = canCSEStorage)
{
ASSERT(arrayMode.isSpecific());
if (arrayMode.type() == Array::String) {
m_insertionSet.insertNode(
- m_indexInBlock, SpecNone, Check, origin, Edge(array, StringUse));
+ m_indexInBlock, SpecNone, Phantom, codeOrigin,
+ Edge(array, StringUse));
} else {
- // Note that we only need to be using a structure check if we opt for SaneChain, since
- // that needs to protect against JSArray's __proto__ being changed.
- Structure* structure = arrayMode.originalArrayStructure(m_graph, origin.semantic);
+ Structure* structure = arrayMode.originalArrayStructure(m_graph, codeOrigin);
Edge indexEdge = index ? Edge(index, Int32Use) : Edge();
-
+
if (arrayMode.doesConversion()) {
if (structure) {
m_insertionSet.insertNode(
- m_indexInBlock, SpecNone, ArrayifyToStructure, origin,
+ m_indexInBlock, SpecNone, ArrayifyToStructure, codeOrigin,
OpInfo(structure), OpInfo(arrayMode.asWord()), Edge(array, CellUse), indexEdge);
} else {
m_insertionSet.insertNode(
- m_indexInBlock, SpecNone, Arrayify, origin,
+ m_indexInBlock, SpecNone, Arrayify, codeOrigin,
OpInfo(arrayMode.asWord()), Edge(array, CellUse), indexEdge);
}
} else {
if (structure) {
m_insertionSet.insertNode(
- m_indexInBlock, SpecNone, CheckStructure, origin,
+ m_indexInBlock, SpecNone, CheckStructure, codeOrigin,
OpInfo(m_graph.addStructureSet(structure)), Edge(array, CellUse));
} else {
m_insertionSet.insertNode(
- m_indexInBlock, SpecNone, CheckArray, origin,
+ m_indexInBlock, SpecNone, CheckArray, codeOrigin,
OpInfo(arrayMode.asWord()), Edge(array, CellUse));
}
}
@@ -1691,11 +1381,11 @@ private:
if (arrayMode.usesButterfly()) {
return m_insertionSet.insertNode(
- m_indexInBlock, SpecNone, GetButterfly, origin, Edge(array, CellUse));
+ m_indexInBlock, SpecNone, GetButterfly, codeOrigin, Edge(array, CellUse));
}
return m_insertionSet.insertNode(
- m_indexInBlock, SpecNone, GetIndexedPropertyStorage, origin,
+ m_indexInBlock, SpecNone, GetIndexedPropertyStorage, codeOrigin,
OpInfo(arrayMode.asWord()), Edge(array, KnownCellUse));
}
@@ -1706,7 +1396,7 @@ private:
switch (node->arrayMode().type()) {
case Array::ForceExit: {
m_insertionSet.insertNode(
- m_indexInBlock, SpecNone, ForceOSRExit, node->origin);
+ m_indexInBlock, SpecNone, ForceOSRExit, node->codeOrigin);
return;
}
@@ -1719,7 +1409,7 @@ private:
return;
default: {
- Node* storage = checkArray(node->arrayMode(), node->origin, base.node(), index.node());
+ Node* storage = checkArray(node->arrayMode(), node->codeOrigin, base.node(), index.node());
if (!storage)
return;
@@ -1763,35 +1453,29 @@ private:
VariableAccessData* variable = node->variableAccessData();
switch (useKind) {
case Int32Use:
- case KnownInt32Use:
if (alwaysUnboxSimplePrimitives()
|| isInt32Speculation(variable->prediction()))
m_profitabilityChanged |= variable->mergeIsProfitableToUnbox(true);
break;
case NumberUse:
case RealNumberUse:
- case DoubleRepUse:
- case DoubleRepRealUse:
if (variable->doubleFormatState() == UsingDoubleFormat)
m_profitabilityChanged |= variable->mergeIsProfitableToUnbox(true);
break;
case BooleanUse:
- case KnownBooleanUse:
if (alwaysUnboxSimplePrimitives()
|| isBooleanSpeculation(variable->prediction()))
m_profitabilityChanged |= variable->mergeIsProfitableToUnbox(true);
break;
- case Int52RepUse:
+ case MachineIntUse:
if (isMachineIntSpeculation(variable->prediction()))
m_profitabilityChanged |= variable->mergeIsProfitableToUnbox(true);
break;
case CellUse:
case KnownCellUse:
case ObjectUse:
- case FunctionUse:
case StringUse:
case KnownStringUse:
- case SymbolUse:
case StringObjectUse:
case StringOrStringObjectUse:
if (alwaysUnboxSimplePrimitives()
@@ -1803,131 +1487,179 @@ private:
}
}
+ // Set the use kind of the edge and perform any actions that need to be done for
+ // that use kind, like inserting intermediate conversion nodes. Never call this
+ // with useKind = UntypedUse explicitly; edges have UntypedUse implicitly and any
+ // edge that survives fixup and still has UntypedUse will have this method called
+ // from observeUntypedEdge(). Also, make sure that if you do change the type of an
+ // edge, you either call fixEdge() or perform the equivalent functionality
+ // yourself. Obviously, you should have a really good reason if you do the latter.
template<UseKind useKind>
void fixEdge(Edge& edge)
{
- observeUseKindOnNode<useKind>(edge.node());
- edge.setUseKind(useKind);
- }
-
- void speculateForBarrier(Edge value)
- {
- // Currently, the DFG won't take advantage of this speculation. But, we want to do it in
- // the DFG anyway because if such a speculation would be wrong, we want to know before
- // we do an expensive compile.
-
- if (value->shouldSpeculateInt32()) {
- insertCheck<Int32Use>(m_indexInBlock, value.node());
- return;
- }
-
- if (value->shouldSpeculateBoolean()) {
- insertCheck<BooleanUse>(m_indexInBlock, value.node());
- return;
- }
-
- if (value->shouldSpeculateOther()) {
- insertCheck<OtherUse>(m_indexInBlock, value.node());
- return;
- }
+ if (isDouble(useKind)) {
+ if (edge->shouldSpeculateInt32ForArithmetic()) {
+ injectInt32ToDoubleNode(edge, useKind);
+ return;
+ }
- if (value->shouldSpeculateNumber()) {
- insertCheck<NumberUse>(m_indexInBlock, value.node());
- return;
+ if (enableInt52() && edge->shouldSpeculateMachineInt()) {
+ // Make all double uses of int52 values have an intermediate Int52ToDouble.
+ // This is for the same reason as Int52ToValue (see below) except that
+ // Int8ToDouble will convert int52's that fit in an int32 into a double
+ // rather than trying to create a boxed int32 like Int52ToValue does.
+
+ Node* result = m_insertionSet.insertNode(
+ m_indexInBlock, SpecInt52AsDouble, Int52ToDouble,
+ m_currentNode->codeOrigin, Edge(edge.node(), NumberUse));
+ edge = Edge(result, useKind);
+ return;
+ }
}
+
+ if (enableInt52() && useKind != MachineIntUse
+ && edge->shouldSpeculateMachineInt() && !edge->shouldSpeculateInt32()) {
+ // We make all non-int52 uses of int52 values have an intermediate Int52ToValue
+ // node to ensure that we handle this properly:
+ //
+ // a: SomeInt52
+ // b: ArithAdd(@a, ...)
+ // c: Call(..., @a)
+ // d: ArithAdd(@a, ...)
+ //
+ // Without an intermediate node and just labeling the uses, we will get:
+ //
+ // a: SomeInt52
+ // b: ArithAdd(Int52:@a, ...)
+ // c: Call(..., Untyped:@a)
+ // d: ArithAdd(Int52:@a, ...)
+ //
+ // And now the c->Untyped:@a edge will box the value of @a into a double. This
+ // is bad, because now the d->Int52:@a edge will either have to do double-to-int
+ // conversions, or will have to OSR exit unconditionally. Alternatively we could
+ // have the c->Untyped:@a edge box the value by copying rather than in-place.
+ // But these boxings are also costly so this wouldn't be great.
+ //
+ // The solution we use is to always have non-Int52 uses of predicted Int52's use
+ // an intervening Int52ToValue node:
+ //
+ // a: SomeInt52
+ // b: ArithAdd(Int52:@a, ...)
+ // x: Int52ToValue(Int52:@a)
+ // c: Call(..., Untyped:@x)
+ // d: ArithAdd(Int52:@a, ...)
+ //
+ // Note that even if we had multiple non-int52 uses of @a, the multiple
+ // Int52ToValue's would get CSE'd together. So the boxing would only happen once.
+ // At the same time, @a would continue to be represented as a native int52.
+ //
+ // An alternative would have been to insert ToNativeInt52 nodes on int52 uses of
+ // int52's. This would have handled the above example but would fall over for:
+ //
+ // a: SomeInt52
+ // b: Call(..., @a)
+ // c: ArithAdd(@a, ...)
+ //
+ // But the solution we use handles the above gracefully.
- if (value->shouldSpeculateNotCell()) {
- insertCheck<NotCellUse>(m_indexInBlock, value.node());
+ Node* result = m_insertionSet.insertNode(
+ m_indexInBlock, SpecInt52, Int52ToValue,
+ m_currentNode->codeOrigin, Edge(edge.node(), UntypedUse));
+ edge = Edge(result, useKind);
return;
}
+
+ observeUseKindOnNode<useKind>(edge.node());
+
+ edge.setUseKind(useKind);
}
- template<UseKind useKind>
- void insertCheck(unsigned indexInBlock, Node* node)
+ void insertStoreBarrier(unsigned indexInBlock, Edge child1, Edge child2 = Edge())
{
- observeUseKindOnNode<useKind>(node);
- m_insertionSet.insertNode(
- indexInBlock, SpecNone, Check, m_currentNode->origin, Edge(node, useKind));
+ Node* barrierNode;
+ if (!child2)
+ barrierNode = m_graph.addNode(SpecNone, StoreBarrier, m_currentNode->codeOrigin, Edge(child1.node(), child1.useKind()));
+ else {
+ barrierNode = m_graph.addNode(SpecNone, ConditionalStoreBarrier, m_currentNode->codeOrigin,
+ Edge(child1.node(), child1.useKind()), Edge(child2.node(), child2.useKind()));
+ }
+ fixupNode(barrierNode);
+ m_insertionSet.insert(indexInBlock, barrierNode);
}
- void fixIntConvertingEdge(Edge& edge)
+ bool fixIntEdge(Edge& edge)
{
Node* node = edge.node();
- if (node->shouldSpeculateInt32OrBoolean()) {
- fixIntOrBooleanEdge(edge);
- return;
+ if (node->shouldSpeculateInt32()) {
+ fixEdge<Int32Use>(edge);
+ return false;
}
UseKind useKind;
if (node->shouldSpeculateMachineInt())
- useKind = Int52RepUse;
+ useKind = MachineIntUse;
else if (node->shouldSpeculateNumber())
- useKind = DoubleRepUse;
+ useKind = NumberUse;
+ else if (node->shouldSpeculateBoolean())
+ useKind = BooleanUse;
else
useKind = NotCellUse;
Node* newNode = m_insertionSet.insertNode(
- m_indexInBlock, SpecInt32, ValueToInt32, m_currentNode->origin,
+ m_indexInBlock, SpecInt32, ValueToInt32, m_currentNode->codeOrigin,
Edge(node, useKind));
observeUseKindOnNode(node, useKind);
edge = Edge(newNode, KnownInt32Use);
+ return true;
}
- void fixIntOrBooleanEdge(Edge& edge)
+ void fixBinaryIntEdges()
{
- Node* node = edge.node();
- if (!node->sawBooleans()) {
- fixEdge<Int32Use>(edge);
- return;
- }
+ AdjacencyList children = m_currentNode->children;
- UseKind useKind;
- if (node->shouldSpeculateBoolean())
- useKind = BooleanUse;
- else
- useKind = UntypedUse;
- Node* newNode = m_insertionSet.insertNode(
- m_indexInBlock, SpecInt32, BooleanToNumber, m_currentNode->origin,
- Edge(node, useKind));
- observeUseKindOnNode(node, useKind);
+ // Call fixIntEdge() on both edges.
+ bool needPhantom =
+ fixIntEdge(m_currentNode->child1()) | fixIntEdge(m_currentNode->child2());
- edge = Edge(newNode, Int32Use);
+ if (!needPhantom)
+ return;
+ m_insertionSet.insertNode(m_indexInBlock + 1, SpecNone, Phantom, m_currentNode->codeOrigin, children);
}
- void fixDoubleOrBooleanEdge(Edge& edge)
+ void injectInt32ToDoubleNode(Edge& edge, UseKind useKind = NumberUse)
{
- Node* node = edge.node();
- if (!node->sawBooleans()) {
- fixEdge<DoubleRepUse>(edge);
- return;
- }
+ Node* result = m_insertionSet.insertNode(
+ m_indexInBlock, SpecInt52AsDouble, Int32ToDouble,
+ m_currentNode->codeOrigin, Edge(edge.node(), NumberUse));
- UseKind useKind;
- if (node->shouldSpeculateBoolean())
- useKind = BooleanUse;
- else
- useKind = UntypedUse;
- Node* newNode = m_insertionSet.insertNode(
- m_indexInBlock, SpecInt32, BooleanToNumber, m_currentNode->origin,
- Edge(node, useKind));
- observeUseKindOnNode(node, useKind);
-
- edge = Edge(newNode, DoubleRepUse);
+ edge = Edge(result, useKind);
}
void truncateConstantToInt32(Edge& edge)
{
Node* oldNode = edge.node();
- JSValue value = oldNode->asJSValue();
+ ASSERT(oldNode->hasConstant());
+ JSValue value = m_graph.valueOfJSConstant(oldNode);
if (value.isInt32())
return;
value = jsNumber(JSC::toInt32(value.asNumber()));
ASSERT(value.isInt32());
+ unsigned constantRegister;
+ if (!codeBlock()->findConstant(value, constantRegister)) {
+ constantRegister = codeBlock()->addConstantLazily();
+ initializeLazyWriteBarrierForConstant(
+ m_graph.m_plan.writeBarriers,
+ codeBlock()->constants()[constantRegister],
+ codeBlock(),
+ constantRegister,
+ codeBlock()->ownerExecutable(),
+ value);
+ }
edge.setNode(m_insertionSet.insertNode(
- m_indexInBlock, SpecInt32, JSConstant, m_currentNode->origin,
- OpInfo(m_graph.freeze(value))));
+ m_indexInBlock, SpecInt32, JSConstant, m_currentNode->codeOrigin,
+ OpInfo(constantRegister)));
}
void truncateConstantsIfNecessary(Node* node, AddSpeculationMode mode)
@@ -1944,11 +1676,11 @@ private:
bool attemptToMakeIntegerAdd(Node* node)
{
- AddSpeculationMode mode = m_graph.addSpeculationMode(node, FixupPass);
+ AddSpeculationMode mode = m_graph.addSpeculationMode(node);
if (mode != DontSpeculateInt32) {
truncateConstantsIfNecessary(node, mode);
- fixIntOrBooleanEdge(node->child1());
- fixIntOrBooleanEdge(node->child2());
+ fixEdge<Int32Use>(node->child1());
+ fixEdge<Int32Use>(node->child2());
if (bytecodeCanTruncateInteger(node->arithNodeFlags()))
node->setArithMode(Arith::Unchecked);
else
@@ -1957,10 +1689,9 @@ private:
}
if (m_graph.addShouldSpeculateMachineInt(node)) {
- fixEdge<Int52RepUse>(node->child1());
- fixEdge<Int52RepUse>(node->child2());
+ fixEdge<MachineIntUse>(node->child1());
+ fixEdge<MachineIntUse>(node->child2());
node->setArithMode(Arith::CheckOverflow);
- node->setResult(NodeResultInt52);
return true;
}
@@ -1971,9 +1702,9 @@ private:
{
if (!isInt32Speculation(node->prediction()))
return false;
- CodeBlock* profiledBlock = m_graph.baselineCodeBlockFor(node->origin.semantic);
+ CodeBlock* profiledBlock = m_graph.baselineCodeBlockFor(node->codeOrigin);
ArrayProfile* arrayProfile =
- profiledBlock->getArrayProfile(node->origin.semantic.bytecodeIndex);
+ profiledBlock->getArrayProfile(node->codeOrigin.bytecodeIndex);
ArrayMode arrayMode = ArrayMode(Array::SelectUsingPredictions);
if (arrayProfile) {
ConcurrentJITLocker locker(profiledBlock->m_lock);
@@ -1991,8 +1722,7 @@ private:
}
}
- arrayMode = arrayMode.refine(
- m_graph, node, node->child1()->prediction(), node->prediction());
+ arrayMode = arrayMode.refine(node->child1()->prediction(), node->prediction());
if (arrayMode.type() == Array::Generic) {
// Check if the input is something that we can't get array length for, but for which we
@@ -2026,16 +1756,16 @@ private:
}
Node* length = prependGetArrayLength(
- node->origin, node->child1().node(), ArrayMode(toArrayType(type)));
+ node->codeOrigin, node->child1().node(), ArrayMode(toArrayType(type)));
Node* shiftAmount = m_insertionSet.insertNode(
- m_indexInBlock, SpecInt32, JSConstant, node->origin,
- OpInfo(m_graph.freeze(jsNumber(logElementSize(type)))));
+ m_indexInBlock, SpecInt32, JSConstant, node->codeOrigin,
+ OpInfo(m_graph.constantRegisterForConstant(jsNumber(logElementSize(type)))));
// We can use a BitLShift here because typed arrays will never have a byteLength
// that overflows int32.
node->setOp(BitLShift);
- node->clearFlags(NodeMustGenerate);
+ node->clearFlags(NodeMustGenerate | NodeClobbersWorld);
observeUseKindOnNode(length, Int32Use);
observeUseKindOnNode(shiftAmount, Int32Use);
node->child1() = Edge(length, Int32Use);
@@ -2046,22 +1776,22 @@ private:
void convertToGetArrayLength(Node* node, ArrayMode arrayMode)
{
node->setOp(GetArrayLength);
- node->clearFlags(NodeMustGenerate);
+ node->clearFlags(NodeMustGenerate | NodeClobbersWorld);
fixEdge<KnownCellUse>(node->child1());
node->setArrayMode(arrayMode);
- Node* storage = checkArray(arrayMode, node->origin, node->child1().node(), 0, lengthNeedsStorage);
+ Node* storage = checkArray(arrayMode, node->codeOrigin, node->child1().node(), 0, lengthNeedsStorage);
if (!storage)
return;
node->child2() = Edge(storage);
}
- Node* prependGetArrayLength(NodeOrigin origin, Node* child, ArrayMode arrayMode)
+ Node* prependGetArrayLength(CodeOrigin codeOrigin, Node* child, ArrayMode arrayMode)
{
- Node* storage = checkArray(arrayMode, origin, child, 0, lengthNeedsStorage);
+ Node* storage = checkArray(arrayMode, codeOrigin, child, 0, lengthNeedsStorage);
return m_insertionSet.insertNode(
- m_indexInBlock, SpecInt32, GetArrayLength, origin,
+ m_indexInBlock, SpecInt32, GetArrayLength, codeOrigin,
OpInfo(arrayMode.asWord()), Edge(child, KnownCellUse), Edge(storage));
}
@@ -2075,177 +1805,15 @@ private:
return false;
checkArray(
- ArrayMode(toArrayType(type)), node->origin, node->child1().node(),
+ ArrayMode(toArrayType(type)), node->codeOrigin, node->child1().node(),
0, neverNeedsStorage);
node->setOp(GetTypedArrayByteOffset);
- node->clearFlags(NodeMustGenerate);
+ node->clearFlags(NodeMustGenerate | NodeClobbersWorld);
fixEdge<KnownCellUse>(node->child1());
return true;
}
-
- void injectTypeConversionsInBlock(BasicBlock* block)
- {
- if (!block)
- return;
- ASSERT(block->isReachable);
- m_block = block;
- for (m_indexInBlock = 0; m_indexInBlock < block->size(); ++m_indexInBlock) {
- m_currentNode = block->at(m_indexInBlock);
- tryToRelaxRepresentation(m_currentNode);
- DFG_NODE_DO_TO_CHILDREN(m_graph, m_currentNode, injectTypeConversionsForEdge);
- }
- m_insertionSet.execute(block);
- }
-
- void tryToRelaxRepresentation(Node* node)
- {
- // Some operations may be able to operate more efficiently over looser representations.
- // Identify those here. This avoids inserting a redundant representation conversion.
- // Also, for some operations, like MovHint, this is a necessary optimization: inserting
- // an otherwise-dead conversion just for a MovHint would break OSR's understanding of
- // the IR.
-
- switch (node->op()) {
- case MovHint:
- case Check:
- DFG_NODE_DO_TO_CHILDREN(m_graph, m_currentNode, fixEdgeRepresentation);
- break;
-
- case ValueToInt32:
- if (node->child1().useKind() == DoubleRepUse
- && !node->child1()->hasDoubleResult()) {
- node->child1().setUseKind(NumberUse);
- break;
- }
- break;
-
- default:
- break;
- }
- }
-
- void fixEdgeRepresentation(Node*, Edge& edge)
- {
- switch (edge.useKind()) {
- case DoubleRepUse:
- case DoubleRepRealUse:
- if (edge->hasDoubleResult())
- break;
-
- if (edge->hasInt52Result())
- edge.setUseKind(Int52RepUse);
- else if (edge.useKind() == DoubleRepUse)
- edge.setUseKind(NumberUse);
- break;
-
- case Int52RepUse:
- // Nothing we can really do.
- break;
-
- case UntypedUse:
- case NumberUse:
- if (edge->hasDoubleResult())
- edge.setUseKind(DoubleRepUse);
- else if (edge->hasInt52Result())
- edge.setUseKind(Int52RepUse);
- break;
-
- case RealNumberUse:
- if (edge->hasDoubleResult())
- edge.setUseKind(DoubleRepRealUse);
- else if (edge->hasInt52Result())
- edge.setUseKind(Int52RepUse);
- break;
-
- default:
- break;
- }
- }
-
- void injectTypeConversionsForEdge(Node* node, Edge& edge)
- {
- ASSERT(node == m_currentNode);
- Node* result = nullptr;
-
- switch (edge.useKind()) {
- case DoubleRepUse:
- case DoubleRepRealUse:
- case DoubleRepMachineIntUse: {
- if (edge->hasDoubleResult())
- break;
-
- if (edge->isNumberConstant()) {
- result = m_insertionSet.insertNode(
- m_indexInBlock, SpecBytecodeDouble, DoubleConstant, node->origin,
- OpInfo(m_graph.freeze(jsDoubleNumber(edge->asNumber()))));
- } else if (edge->hasInt52Result()) {
- result = m_insertionSet.insertNode(
- m_indexInBlock, SpecInt52AsDouble, DoubleRep, node->origin,
- Edge(edge.node(), Int52RepUse));
- } else {
- UseKind useKind;
- if (edge->shouldSpeculateDoubleReal())
- useKind = RealNumberUse;
- else if (edge->shouldSpeculateNumber())
- useKind = NumberUse;
- else
- useKind = NotCellUse;
-
- result = m_insertionSet.insertNode(
- m_indexInBlock, SpecBytecodeDouble, DoubleRep, node->origin,
- Edge(edge.node(), useKind));
- }
-
- edge.setNode(result);
- break;
- }
-
- case Int52RepUse: {
- if (edge->hasInt52Result())
- break;
-
- if (edge->isMachineIntConstant()) {
- result = m_insertionSet.insertNode(
- m_indexInBlock, SpecMachineInt, Int52Constant, node->origin,
- OpInfo(edge->constant()));
- } else if (edge->hasDoubleResult()) {
- result = m_insertionSet.insertNode(
- m_indexInBlock, SpecMachineInt, Int52Rep, node->origin,
- Edge(edge.node(), DoubleRepMachineIntUse));
- } else if (edge->shouldSpeculateInt32ForArithmetic()) {
- result = m_insertionSet.insertNode(
- m_indexInBlock, SpecInt32, Int52Rep, node->origin,
- Edge(edge.node(), Int32Use));
- } else {
- result = m_insertionSet.insertNode(
- m_indexInBlock, SpecMachineInt, Int52Rep, node->origin,
- Edge(edge.node(), MachineIntUse));
- }
- edge.setNode(result);
- break;
- }
-
- default: {
- if (!edge->hasDoubleResult() && !edge->hasInt52Result())
- break;
-
- if (edge->hasDoubleResult()) {
- result = m_insertionSet.insertNode(
- m_indexInBlock, SpecBytecodeDouble, ValueRep, node->origin,
- Edge(edge.node(), DoubleRepUse));
- } else {
- result = m_insertionSet.insertNode(
- m_indexInBlock, SpecInt32 | SpecInt52AsDouble, ValueRep, node->origin,
- Edge(edge.node(), Int52RepUse));
- }
-
- edge.setNode(result);
- break;
- } }
- }
-
BasicBlock* m_block;
unsigned m_indexInBlock;
Node* m_currentNode;
diff --git a/Source/JavaScriptCore/dfg/DFGFixupPhase.h b/Source/JavaScriptCore/dfg/DFGFixupPhase.h
index 55f84a9f4..d496d59b2 100644
--- a/Source/JavaScriptCore/dfg/DFGFixupPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGFixupPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGFixupPhase_h
#define DFGFixupPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGFlushFormat.cpp b/Source/JavaScriptCore/dfg/DFGFlushFormat.cpp
index fa483ac6c..fd6c249e6 100644
--- a/Source/JavaScriptCore/dfg/DFGFlushFormat.cpp
+++ b/Source/JavaScriptCore/dfg/DFGFlushFormat.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,8 +28,6 @@
#if ENABLE(DFG_JIT)
-#include "JSCInlines.h"
-
namespace WTF {
using namespace JSC::DFG;
@@ -58,6 +56,9 @@ void printInternal(PrintStream& out, FlushFormat format)
case FlushedJSValue:
out.print("FlushedJSValue");
return;
+ case FlushedArguments:
+ out.print("FlushedArguments");
+ return;
case ConflictingFlush:
out.print("ConflictingFlush");
return;
diff --git a/Source/JavaScriptCore/dfg/DFGFlushFormat.h b/Source/JavaScriptCore/dfg/DFGFlushFormat.h
index 480944ba6..9083a107e 100644
--- a/Source/JavaScriptCore/dfg/DFGFlushFormat.h
+++ b/Source/JavaScriptCore/dfg/DFGFlushFormat.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGFlushFormat_h
#define DFGFlushFormat_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGNodeFlags.h"
@@ -44,6 +46,7 @@ enum FlushFormat {
FlushedCell,
FlushedBoolean,
FlushedJSValue,
+ FlushedArguments,
ConflictingFlush
};
@@ -54,13 +57,14 @@ inline NodeFlags resultFor(FlushFormat format)
case FlushedJSValue:
case FlushedCell:
case ConflictingFlush:
+ case FlushedArguments:
return NodeResultJS;
case FlushedInt32:
return NodeResultInt32;
case FlushedInt52:
return NodeResultInt52;
case FlushedDouble:
- return NodeResultDouble;
+ return NodeResultNumber;
case FlushedBoolean:
return NodeResultBoolean;
}
@@ -74,15 +78,16 @@ inline UseKind useKindFor(FlushFormat format)
case DeadFlush:
case FlushedJSValue:
case ConflictingFlush:
+ case FlushedArguments:
return UntypedUse;
case FlushedCell:
return CellUse;
case FlushedInt32:
return Int32Use;
case FlushedInt52:
- return Int52RepUse;
+ return MachineIntUse;
case FlushedDouble:
- return DoubleRepUse;
+ return NumberUse;
case FlushedBoolean:
return BooleanUse;
}
@@ -90,11 +95,6 @@ inline UseKind useKindFor(FlushFormat format)
return UntypedUse;
}
-inline SpeculatedType typeFilterFor(FlushFormat format)
-{
- return typeFilterFor(useKindFor(format));
-}
-
inline DataFormat dataFormatFor(FlushFormat format)
{
switch (format) {
@@ -113,27 +113,13 @@ inline DataFormat dataFormatFor(FlushFormat format)
return DataFormatCell;
case FlushedBoolean:
return DataFormatBoolean;
+ case FlushedArguments:
+ return DataFormatArguments;
}
RELEASE_ASSERT_NOT_REACHED();
return DataFormatDead;
}
-inline FlushFormat merge(FlushFormat a, FlushFormat b)
-{
- if (a == DeadFlush)
- return b;
- if (b == DeadFlush)
- return a;
- if (a == b)
- return a;
- return ConflictingFlush;
-}
-
-inline bool isConcrete(FlushFormat format)
-{
- return format != DeadFlush && format != ConflictingFlush;
-}
-
} } // namespace JSC::DFG
namespace WTF {
diff --git a/Source/JavaScriptCore/dfg/DFGFlushLivenessAnalysisPhase.cpp b/Source/JavaScriptCore/dfg/DFGFlushLivenessAnalysisPhase.cpp
new file mode 100644
index 000000000..c4db38268
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGFlushLivenessAnalysisPhase.cpp
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGFlushLivenessAnalysisPhase.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGBasicBlockInlines.h"
+#include "DFGGraph.h"
+#include "DFGInsertionSet.h"
+#include "DFGPhase.h"
+#include "OperandsInlines.h"
+#include "Operations.h"
+
+namespace JSC { namespace DFG {
+
+class FlushLivenessAnalysisPhase : public Phase {
+public:
+ FlushLivenessAnalysisPhase(Graph& graph)
+ : Phase(graph, "flush-liveness analysis")
+ {
+ }
+
+ bool run()
+ {
+ ASSERT(m_graph.m_form == SSA);
+
+ // Liveness is a backwards analysis; the roots are the blocks that
+ // end in a terminal (Return/Unreachable). For now, we
+ // use a fixpoint formulation since liveness is a rapid analysis with
+ // convergence guaranteed after O(connectivity).
+
+ // Start by assuming that everything is dead.
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+ block->ssa->flushAtHead.fill(FlushedAt());
+ block->ssa->flushAtTail.fill(FlushedAt());
+ }
+
+ do {
+ m_changed = false;
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;)
+ process(blockIndex);
+ } while (m_changed);
+
+ Operands<FlushedAt>& root = m_graph.block(0)->ssa->flushAtHead;
+ for (unsigned i = root.size(); i--;) {
+ if (root.isArgument(i)) {
+ if (!root[i]
+ || root[i] == FlushedAt(FlushedJSValue, VirtualRegister(root.operandForIndex(i))))
+ continue;
+ } else {
+ if (!root[i])
+ continue;
+ }
+ dataLog(
+ "Bad flush liveness analysis result: bad flush liveness at root: ",
+ root, "\n");
+ dataLog("IR at time of error:\n");
+ m_graph.dump();
+ CRASH();
+ }
+
+ return true;
+ }
+
+private:
+ void process(BlockIndex blockIndex)
+ {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ return;
+
+ m_live = block->ssa->flushAtTail;
+
+ for (unsigned nodeIndex = block->size(); nodeIndex--;) {
+ Node* node = block->at(nodeIndex);
+
+ switch (node->op()) {
+ case SetLocal: {
+ VariableAccessData* variable = node->variableAccessData();
+ FlushedAt& current = m_live.operand(variable->local());
+ if (!!current && current != variable->flushedAt())
+ reportError(node);
+ current = FlushedAt();
+ break;
+ }
+
+ case GetArgument: {
+ VariableAccessData* variable = node->variableAccessData();
+ ASSERT(variable->local() == variable->machineLocal());
+ ASSERT(variable->local().isArgument());
+ FlushedAt& current = m_live.operand(variable->local());
+ if (!!current && current != variable->flushedAt())
+ reportError(node);
+ current = FlushedAt(FlushedJSValue, node->local());
+ break;
+ }
+
+ case Flush:
+ case GetLocal: {
+ VariableAccessData* variable = node->variableAccessData();
+ FlushedAt& current = m_live.operand(variable->local());
+ if (!!current && current != variable->flushedAt())
+ reportError(node);
+ current = variable->flushedAt();
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ if (m_live == block->ssa->flushAtHead)
+ return;
+
+ m_changed = true;
+ block->ssa->flushAtHead = m_live;
+ for (unsigned i = block->predecessors.size(); i--;) {
+ BasicBlock* predecessor = block->predecessors[i];
+ for (unsigned j = m_live.size(); j--;) {
+ FlushedAt& predecessorFlush = predecessor->ssa->flushAtTail[j];
+ FlushedAt myFlush = m_live[j];
+
+ // Three possibilities:
+ // 1) Predecessor format is Dead, in which case it acquires our format.
+ // 2) Predecessor format is not Dead but our format is dead, in which
+ // case we acquire the predecessor format.
+ // 3) Predecessor format is identical to our format, in which case we
+ // do nothing.
+ // 4) Predecessor format is different from our format and it's not Dead,
+ // in which case we have an erroneous set of Flushes and SetLocals.
+
+ if (!predecessorFlush) {
+ predecessorFlush = myFlush;
+ continue;
+ }
+
+ if (!myFlush) {
+ m_live[j] = predecessorFlush;
+ continue;
+ }
+
+ if (predecessorFlush == myFlush)
+ continue;
+
+ dataLog(
+ "Bad Flush merge at edge ", *predecessor, " -> ", *block,
+ ", local variable r", m_live.operandForIndex(j), ": ", *predecessor,
+ " has ", predecessorFlush, " and ", *block, " has ", myFlush, ".\n");
+ dataLog("IR at time of error:\n");
+ m_graph.dump();
+ CRASH();
+ }
+ }
+ }
+
+ NO_RETURN_DUE_TO_CRASH void reportError(Node* node)
+ {
+ dataLog(
+ "Bad flush merge at node ", node, ", r", node->local(), ": node claims ",
+ node->variableAccessData()->flushedAt(), " but backwards flow claims ",
+ m_live.operand(node->local()), ".\n");
+ dataLog("IR at time of error:\n");
+ m_graph.dump();
+ CRASH();
+ }
+
+ bool m_changed;
+ Operands<FlushedAt> m_live;
+};
+
+bool performFlushLivenessAnalysis(Graph& graph)
+{
+ SamplingRegion samplingRegion("DFG Flush-Liveness Analysis Phase");
+ return runPhase<FlushLivenessAnalysisPhase>(graph);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
diff --git a/Source/JavaScriptCore/dfg/DFGIntegerCheckCombiningPhase.h b/Source/JavaScriptCore/dfg/DFGFlushLivenessAnalysisPhase.h
index 6abec0355..4d7b3c429 100644
--- a/Source/JavaScriptCore/dfg/DFGIntegerCheckCombiningPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGFlushLivenessAnalysisPhase.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,22 +23,26 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DFGIntegerCheckCombiningPhase_h
-#define DFGIntegerCheckCombiningPhase_h
+#ifndef DFGFlushLivenessAnalysisPhase_h
+#define DFGFlushLivenessAnalysisPhase_h
+
+#include <wtf/Platform.h>
#if ENABLE(DFG_JIT)
+#include "DFGCommon.h"
+
namespace JSC { namespace DFG {
class Graph;
-// Removes overflow checks and out-of-bounds checks by hoisting them.
+// Computes BasicBlock::ssa->flushFormatAtHead
-bool performIntegerCheckCombining(Graph&);
+bool performFlushLivenessAnalysis(Graph&);
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
-#endif // DFGIntegerCheckCombiningPhase_h
+#endif // DFGFlushLivenessAnalysisPhase_h
diff --git a/Source/JavaScriptCore/dfg/DFGFlushedAt.cpp b/Source/JavaScriptCore/dfg/DFGFlushedAt.cpp
index c15a2e6b0..ce95f45d5 100644
--- a/Source/JavaScriptCore/dfg/DFGFlushedAt.cpp
+++ b/Source/JavaScriptCore/dfg/DFGFlushedAt.cpp
@@ -28,18 +28,14 @@
#if ENABLE(DFG_JIT)
-#include "JSCInlines.h"
-
namespace JSC { namespace DFG {
void FlushedAt::dump(PrintStream& out) const
{
if (m_format == DeadFlush || m_format == ConflictingFlush)
out.print(m_format);
- else if (m_virtualRegister.isValid())
- out.print(m_virtualRegister, ":", m_format);
else
- out.print(m_format);
+ out.print("r", m_virtualRegister, ":", m_format);
}
void FlushedAt::dumpInContext(PrintStream& out, DumpContext*) const
diff --git a/Source/JavaScriptCore/dfg/DFGFlushedAt.h b/Source/JavaScriptCore/dfg/DFGFlushedAt.h
index ea913dd51..6dfe716cc 100644
--- a/Source/JavaScriptCore/dfg/DFGFlushedAt.h
+++ b/Source/JavaScriptCore/dfg/DFGFlushedAt.h
@@ -26,6 +26,8 @@
#ifndef DFGFlushedAt_h
#define DFGFlushedAt_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGFlushFormat.h"
@@ -52,6 +54,8 @@ public:
{
if (format == DeadFlush)
ASSERT(!virtualRegister.isValid());
+ else
+ ASSERT(virtualRegister.isValid());
}
bool operator!() const { return m_format == DeadFlush; }
diff --git a/Source/JavaScriptCore/dfg/DFGForAllKills.h b/Source/JavaScriptCore/dfg/DFGForAllKills.h
deleted file mode 100644
index bb630cd44..000000000
--- a/Source/JavaScriptCore/dfg/DFGForAllKills.h
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGForAllKills_h
-#define DFGForAllKills_h
-
-#include "DFGCombinedLiveness.h"
-#include "DFGGraph.h"
-#include "DFGOSRAvailabilityAnalysisPhase.h"
-#include "FullBytecodeLiveness.h"
-
-namespace JSC { namespace DFG {
-
-// Utilities for finding the last points where a node is live in DFG SSA. This accounts for liveness due
-// to OSR exit. This is usually used for enumerating over all of the program points where a node is live,
-// by exploring all blocks where the node is live at tail and then exploring all program points where the
-// node is killed. A prerequisite to using these utilities is having liveness and OSR availability
-// computed.
-
-// This tells you those things that die on the boundary between nodeBefore and nodeAfter. It is
-// conservative in the sense that it might resort to telling you some things that are still live at
-// nodeAfter.
-template<typename Functor>
-void forAllKilledOperands(Graph& graph, Node* nodeBefore, Node* nodeAfter, const Functor& functor)
-{
- CodeOrigin before = nodeBefore->origin.forExit;
-
- if (!nodeAfter) {
- graph.forAllLiveInBytecode(before, functor);
- return;
- }
-
- CodeOrigin after = nodeAfter->origin.forExit;
-
- VirtualRegister alreadyNoted;
- if (!!after) {
- // If we MovHint something that is live at the time, then we kill the old value.
- if (nodeAfter->containsMovHint()) {
- VirtualRegister reg = nodeAfter->unlinkedLocal();
- if (graph.isLiveInBytecode(reg, after)) {
- functor(reg);
- alreadyNoted = reg;
- }
- }
- }
-
- if (!before) {
- if (!after)
- return;
- // The true before-origin is the origin at predecessors that jump to us. But there can be
- // many such predecessors and they will likely all have a different origin. So, it's better
- // to do the conservative thing.
- graph.forAllLocalsLiveInBytecode(after, functor);
- return;
- }
-
- if (before == after)
- return;
-
- // before could be unset even if after is, but the opposite cannot happen.
- ASSERT(!!after);
-
- // It's easier to do this if the inline call frames are the same. This is way faster than the
- // other loop, below.
- if (before.inlineCallFrame == after.inlineCallFrame) {
- int stackOffset = before.inlineCallFrame ? before.inlineCallFrame->stackOffset : 0;
- CodeBlock* codeBlock = graph.baselineCodeBlockFor(before.inlineCallFrame);
- FullBytecodeLiveness& fullLiveness = graph.livenessFor(codeBlock);
- const FastBitVector& liveBefore = fullLiveness.getLiveness(before.bytecodeIndex);
- const FastBitVector& liveAfter = fullLiveness.getLiveness(after.bytecodeIndex);
-
- for (unsigned relativeLocal = codeBlock->m_numCalleeRegisters; relativeLocal--;) {
- if (liveBefore.get(relativeLocal) && !liveAfter.get(relativeLocal))
- functor(virtualRegisterForLocal(relativeLocal) + stackOffset);
- }
-
- return;
- }
-
- // Detect kills the super conservative way: it is killed if it was live before and dead after.
- BitVector liveAfter = graph.localsLiveInBytecode(after);
- graph.forAllLocalsLiveInBytecode(
- before,
- [&] (VirtualRegister reg) {
- if (reg == alreadyNoted)
- return;
- if (liveAfter.get(reg.toLocal()))
- return;
- functor(reg);
- });
-}
-
-// Tells you all of the nodes that would no longer be live across the node at this nodeIndex.
-template<typename Functor>
-void forAllKilledNodesAtNodeIndex(
- Graph& graph, AvailabilityMap& availabilityMap, BasicBlock* block, unsigned nodeIndex,
- const Functor& functor)
-{
- static const unsigned seenInClosureFlag = 1;
- static const unsigned calledFunctorFlag = 2;
- HashMap<Node*, unsigned> flags;
-
- Node* node = block->at(nodeIndex);
-
- graph.doToChildren(
- node,
- [&] (Edge edge) {
- if (edge.doesKill()) {
- auto& result = flags.add(edge.node(), 0).iterator->value;
- if (!(result & calledFunctorFlag)) {
- functor(edge.node());
- result |= calledFunctorFlag;
- }
- }
- });
-
- Node* before = nullptr;
- if (nodeIndex)
- before = block->at(nodeIndex - 1);
-
- forAllKilledOperands(
- graph, before, node,
- [&] (VirtualRegister reg) {
- availabilityMap.closeStartingWithLocal(
- reg,
- [&] (Node* node) -> bool {
- return flags.get(node) & seenInClosureFlag;
- },
- [&] (Node* node) -> bool {
- auto& resultFlags = flags.add(node, 0).iterator->value;
- bool result = resultFlags & seenInClosureFlag;
- if (!(resultFlags & calledFunctorFlag))
- functor(node);
- resultFlags |= seenInClosureFlag | calledFunctorFlag;
- return result;
- });
- });
-}
-
-// Tells you all of the places to start searching from in a basic block. Gives you the node index at which
-// the value is either no longer live. This pretends that nodes are dead at the end of the block, so that
-// you can use this to do per-basic-block analyses.
-template<typename Functor>
-void forAllKillsInBlock(
- Graph& graph, const CombinedLiveness& combinedLiveness, BasicBlock* block,
- const Functor& functor)
-{
- for (Node* node : combinedLiveness.liveAtTail[block])
- functor(block->size(), node);
-
- LocalOSRAvailabilityCalculator localAvailability;
- localAvailability.beginBlock(block);
- // Start at the second node, because the functor is expected to only inspect nodes from the start of
- // the block up to nodeIndex (exclusive), so if nodeIndex is zero then the functor has nothing to do.
- for (unsigned nodeIndex = 1; nodeIndex < block->size(); ++nodeIndex) {
- forAllKilledNodesAtNodeIndex(
- graph, localAvailability.m_availability, block, nodeIndex,
- [&] (Node* node) {
- functor(nodeIndex, node);
- });
- localAvailability.executeNode(block->at(nodeIndex));
- }
-}
-
-} } // namespace JSC::DFG
-
-#endif // DFGForAllKills_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGFrozenValue.cpp b/Source/JavaScriptCore/dfg/DFGFrozenValue.cpp
deleted file mode 100644
index a62c38dde..000000000
--- a/Source/JavaScriptCore/dfg/DFGFrozenValue.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGFrozenValue.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "JSCInlines.h"
-
-namespace JSC { namespace DFG {
-
-FrozenValue* FrozenValue::emptySingleton()
-{
- static FrozenValue empty;
- return &empty;
-}
-
-void FrozenValue::dumpInContext(PrintStream& out, DumpContext* context) const
-{
- if (!!m_value && m_value.isCell())
- out.print(m_strength, ":");
- m_value.dumpInContextAssumingStructure(out, context, m_structure);
-}
-
-void FrozenValue::dump(PrintStream& out) const
-{
- dumpInContext(out, 0);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGFrozenValue.h b/Source/JavaScriptCore/dfg/DFGFrozenValue.h
deleted file mode 100644
index 094356fcc..000000000
--- a/Source/JavaScriptCore/dfg/DFGFrozenValue.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGFrozenValue_h
-#define DFGFrozenValue_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGValueStrength.h"
-#include "JSCell.h"
-#include "JSCJSValue.h"
-#include "Structure.h"
-
-namespace JSC { namespace DFG {
-
-class Graph;
-
-class FrozenValue {
-public:
- FrozenValue()
- : m_structure(nullptr)
- , m_strength(WeakValue)
- {
- }
-
- FrozenValue(JSValue value)
- : m_value(value)
- , m_structure(nullptr)
- , m_strength(WeakValue)
- {
- RELEASE_ASSERT(!value || !value.isCell());
- }
-
- FrozenValue(JSValue value, Structure* structure, ValueStrength strength)
- : m_value(value)
- , m_structure(structure)
- , m_strength(strength)
- {
- ASSERT((!!value && value.isCell()) == !!structure);
- ASSERT(!value || !value.isCell() || value.asCell()->classInfo() == structure->classInfo());
- ASSERT(!!structure || (strength == WeakValue));
- }
-
- static FrozenValue* emptySingleton();
-
- bool operator!() const { return !m_value; }
-
- JSValue value() const { return m_value; }
- JSCell* cell() const { return m_value.asCell(); }
-
- template<typename T>
- T dynamicCast()
- {
- return jsDynamicCast<T>(value());
- }
- template<typename T>
- T cast()
- {
- return jsCast<T>(value());
- }
-
- Structure* structure() const { return m_structure; }
-
- void strengthenTo(ValueStrength strength)
- {
- if (!!m_value && m_value.isCell())
- m_strength = merge(m_strength, strength);
- }
-
- bool pointsToHeap() const { return !!value() && value().isCell(); }
-
- // The strength of the value itself. The structure is almost always weak.
- ValueStrength strength() const { return m_strength; }
-
- void dumpInContext(PrintStream& out, DumpContext* context) const;
- void dump(PrintStream& out) const;
-
-private:
- friend class Graph;
-
- // This is a utility method for DFG::Graph::freeze(). You should almost always call
- // Graph::freeze() directly. Calling this instead of Graph::freeze() can result in
- // the same constant being viewed as having different structures during the course
- // of compilation, which can sometimes cause bad things to happen. For example, we
- // may observe that one version of the constant has an unwatchable structure but
- // then a later version may start to have a watchable structure due to a transition.
- // The point of freezing is to ensure that we generally only see one version of
- // constants, but that requires freezing through the Graph.
- static FrozenValue freeze(JSValue value)
- {
- return FrozenValue(
- value,
- (!!value && value.isCell()) ? value.asCell()->structure() : nullptr,
- WeakValue);
- }
-
- JSValue m_value;
- Structure* m_structure;
- ValueStrength m_strength;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGFrozenValue_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGFunctionWhitelist.cpp b/Source/JavaScriptCore/dfg/DFGFunctionWhitelist.cpp
deleted file mode 100644
index 57dc109d4..000000000
--- a/Source/JavaScriptCore/dfg/DFGFunctionWhitelist.cpp
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGFunctionWhitelist.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "CodeBlock.h"
-#include "Options.h"
-#include <stdio.h>
-#include <string.h>
-#include <wtf/NeverDestroyed.h>
-#include <wtf/text/StringBuilder.h>
-
-namespace JSC { namespace DFG {
-
-FunctionWhitelist& FunctionWhitelist::ensureGlobalWhitelist()
-{
- static LazyNeverDestroyed<FunctionWhitelist> functionWhitelist;
- static std::once_flag initializeWhitelistFlag;
- std::call_once(initializeWhitelistFlag, [] {
- const char* functionWhitelistFile = Options::dfgWhitelist();
- functionWhitelist.construct(functionWhitelistFile);
- });
- return functionWhitelist;
-}
-
-FunctionWhitelist::FunctionWhitelist(const char* filename)
-{
- parseFunctionNamesInFile(filename);
-}
-
-void FunctionWhitelist::parseFunctionNamesInFile(const char* filename)
-{
- if (!filename)
- return;
-
- FILE* f = fopen(filename, "r");
- if (!f) {
- dataLogF("Failed to open file %s. Did you add the file-read-data entitlement to WebProcess.sb?\n", filename);
- return;
- }
-
- char* line;
- char buffer[BUFSIZ];
- while ((line = fgets(buffer, sizeof(buffer), f))) {
- if (strstr(line, "//") == line)
- continue;
-
- // Get rid of newlines at the ends of the strings.
- size_t length = strlen(line);
- if (line[length - 1] == '\n') {
- line[length - 1] = '\0';
- length--;
- }
-
- // Skip empty lines.
- if (!length)
- continue;
-
- m_entries.add(String(line, length));
- }
-
- int result = fclose(f);
- if (result)
- dataLogF("Failed to close file %s: %s\n", filename, strerror(errno));
-}
-
-bool FunctionWhitelist::contains(CodeBlock* codeBlock) const
-{
- ASSERT(!isCompilationThread());
- if (!Options::dfgWhitelist())
- return true;
-
- if (m_entries.isEmpty())
- return false;
-
- String name = String::fromUTF8(codeBlock->inferredName());
- if (m_entries.contains(name))
- return true;
-
- String hash = String::fromUTF8(codeBlock->hashAsStringIfPossible());
- if (m_entries.contains(hash))
- return true;
-
- return m_entries.contains(name + '#' + hash);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGGenerationInfo.h b/Source/JavaScriptCore/dfg/DFGGenerationInfo.h
index b30f3508d..e3330fa3b 100644
--- a/Source/JavaScriptCore/dfg/DFGGenerationInfo.h
+++ b/Source/JavaScriptCore/dfg/DFGGenerationInfo.h
@@ -38,9 +38,9 @@ namespace JSC { namespace DFG {
// === GenerationInfo ===
//
-// This class is used to track the current status of live values during code generation.
+// This class is used to track the current status of a live values during code generation.
// Can provide information as to whether a value is in machine registers, and if so which,
-// whether a value has been spilled to the RegisterFile, and if so may be able to provide
+// whether a value has been spilled to the RegsiterFile, and if so may be able to provide
// details of the format in memory (all values are spilled in a boxed form, but we may be
// able to track the type of box), and tracks how many outstanding uses of a value remain,
// so that we know when the value is dead and the machine registers associated with it
@@ -153,6 +153,8 @@ public:
void noticeOSRBirth(VariableEventStream& stream, Node* node, VirtualRegister virtualRegister)
{
+ if (m_isConstant)
+ return;
if (m_node != node)
return;
if (!alive())
@@ -162,9 +164,7 @@ public:
m_bornForOSR = true;
- if (m_isConstant)
- appendBirth(stream);
- else if (m_registerFormat != DataFormatNone)
+ if (m_registerFormat != DataFormatNone)
appendFill(BirthToFill, stream);
else if (m_spillFormat != DataFormatNone)
appendSpill(BirthToSpill, stream, virtualRegister);
@@ -379,11 +379,6 @@ public:
}
private:
- void appendBirth(VariableEventStream& stream)
- {
- stream.appendAndLog(VariableEvent::birth(MinifiedID(m_node)));
- }
-
void appendFill(VariableEventKind kind, VariableEventStream& stream)
{
ASSERT(m_bornForOSR);
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.cpp b/Source/JavaScriptCore/dfg/DFGGraph.cpp
index 4d013be79..8256007e9 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.cpp
+++ b/Source/JavaScriptCore/dfg/DFGGraph.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,27 +26,23 @@
#include "config.h"
#include "DFGGraph.h"
-#if ENABLE(DFG_JIT)
-
-#include "BytecodeKills.h"
#include "BytecodeLivenessAnalysisInlines.h"
#include "CodeBlock.h"
#include "CodeBlockWithJITType.h"
-#include "DFGBlockWorklist.h"
#include "DFGClobberSet.h"
#include "DFGJITCode.h"
#include "DFGVariableAccessDataDump.h"
#include "FullBytecodeLiveness.h"
#include "FunctionExecutableDump.h"
#include "JIT.h"
-#include "JSLexicalEnvironment.h"
-#include "MaxFrameExtentForSlowPathCall.h"
+#include "JSActivation.h"
#include "OperandsInlines.h"
-#include "JSCInlines.h"
-#include "StackAlignment.h"
+#include "Operations.h"
#include <wtf/CommaPrinter.h>
#include <wtf/ListDump.h>
+#if ENABLE(DFG_JIT)
+
namespace JSC { namespace DFG {
// Creates an array of stringized names.
@@ -62,31 +58,24 @@ Graph::Graph(VM& vm, Plan& plan, LongLivedState& longLivedState)
, m_codeBlock(m_plan.codeBlock.get())
, m_profiledBlock(m_codeBlock->alternative())
, m_allocator(longLivedState.m_allocator)
+ , m_mustHandleAbstractValues(OperandsLike, plan.mustHandleValues)
+ , m_inlineCallFrames(adoptPtr(new InlineCallFrameSet()))
+ , m_hasArguments(false)
, m_nextMachineLocal(0)
+ , m_machineCaptureStart(std::numeric_limits<int>::max())
, m_fixpointState(BeforeFixpoint)
- , m_structureRegistrationState(HaveNotStartedRegistering)
, m_form(LoadStore)
, m_unificationState(LocallyUnified)
, m_refCountState(EverythingIsLive)
{
ASSERT(m_profiledBlock);
- m_hasDebuggerEnabled = m_profiledBlock->globalObject()->hasDebugger()
- || Options::forceDebuggerBytecodeGeneration();
+ for (unsigned i = m_mustHandleAbstractValues.size(); i--;)
+ m_mustHandleAbstractValues[i].setMostSpecific(*this, plan.mustHandleValues[i]);
}
Graph::~Graph()
{
- for (BlockIndex blockIndex = numBlocks(); blockIndex--;) {
- BasicBlock* block = this->block(blockIndex);
- if (!block)
- continue;
-
- for (unsigned phiIndex = block->phis.size(); phiIndex--;)
- m_allocator.free(block->phis[phiIndex]);
- for (unsigned nodeIndex = block->size(); nodeIndex--;)
- m_allocator.free(block->at(nodeIndex));
- }
m_allocator.freeAll();
}
@@ -106,11 +95,11 @@ bool Graph::dumpCodeOrigin(PrintStream& out, const char* prefix, Node* previousN
if (!previousNode)
return false;
- if (previousNode->origin.semantic.inlineCallFrame == currentNode->origin.semantic.inlineCallFrame)
+ if (previousNode->codeOrigin.inlineCallFrame == currentNode->codeOrigin.inlineCallFrame)
return false;
- Vector<CodeOrigin> previousInlineStack = previousNode->origin.semantic.inlineStack();
- Vector<CodeOrigin> currentInlineStack = currentNode->origin.semantic.inlineStack();
+ Vector<CodeOrigin> previousInlineStack = previousNode->codeOrigin.inlineStack();
+ Vector<CodeOrigin> currentInlineStack = currentNode->codeOrigin.inlineStack();
unsigned commonSize = std::min(previousInlineStack.size(), currentInlineStack.size());
unsigned indexOfDivergence = commonSize;
for (unsigned i = 0; i < commonSize; ++i) {
@@ -143,7 +132,7 @@ bool Graph::dumpCodeOrigin(PrintStream& out, const char* prefix, Node* previousN
int Graph::amountOfNodeWhiteSpace(Node* node)
{
- return (node->origin.semantic.inlineDepth() - 1) * 2;
+ return (node->codeOrigin.inlineDepth() - 1) * 2;
}
void Graph::printNodeWhiteSpace(PrintStream& out, Node* node)
@@ -156,6 +145,7 @@ void Graph::dump(PrintStream& out, const char* prefix, Node* node, DumpContext*
NodeType op = node->op();
unsigned refCount = node->refCount();
+ bool skipped = !refCount;
bool mustGenerate = node->mustGenerate();
if (mustGenerate)
--refCount;
@@ -177,10 +167,11 @@ void Graph::dump(PrintStream& out, const char* prefix, Node* node, DumpContext*
// (5) The arguments to the operation. The may be of the form:
// @# - a NodeIndex referencing a prior node in the graph.
// arg# - an argument number.
+ // $# - the index in the CodeBlock of a constant { for numeric constants the value is displayed | for integers, in both decimal and hex }.
// id# - the index in the CodeBlock of an identifier { if codeBlock is passed to dump(), the string representation is displayed }.
// var# - the index of a var on the global object, used by GetGlobalVar/PutGlobalVar operations.
- out.printf("% 4d:<%c%u:", (int)node->index(), mustGenerate ? '!' : ' ', refCount);
- if (node->hasResult() && node->hasVirtualRegister() && node->virtualRegister().isValid())
+ out.printf("% 4d:%s<%c%u:", (int)node->index(), skipped ? " skipped " : " ", mustGenerate ? '!' : ' ', refCount);
+ if (node->hasResult() && !skipped && node->hasVirtualRegister())
out.print(node->virtualRegister());
else
out.print("-");
@@ -209,94 +200,88 @@ void Graph::dump(PrintStream& out, const char* prefix, Node* node, DumpContext*
out.print(comma, node->arrayMode());
if (node->hasArithMode())
out.print(comma, node->arithMode());
- if (node->hasScopeOffset())
- out.print(comma, node->scopeOffset());
- if (node->hasDirectArgumentsOffset())
- out.print(comma, node->capturedArgumentsOffset());
+ if (node->hasVarNumber())
+ out.print(comma, node->varNumber());
if (node->hasRegisterPointer())
- out.print(comma, "global", globalObjectFor(node->origin.semantic)->findVariableIndex(node->variablePointer()), "(", RawPointer(node->variablePointer()), ")");
+ out.print(comma, "global", globalObjectFor(node->codeOrigin)->findRegisterIndex(node->registerPointer()), "(", RawPointer(node->registerPointer()), ")");
if (node->hasIdentifier())
out.print(comma, "id", node->identifierNumber(), "{", identifiers()[node->identifierNumber()], "}");
- if (node->hasPromotedLocationDescriptor())
- out.print(comma, node->promotedLocationDescriptor());
if (node->hasStructureSet())
out.print(comma, inContext(node->structureSet(), context));
if (node->hasStructure())
out.print(comma, inContext(*node->structure(), context));
- if (node->hasTransition()) {
- out.print(comma, pointerDumpInContext(node->transition(), context));
-#if USE(JSVALUE64)
- out.print(", ID:", node->transition()->next->id());
-#else
- out.print(", ID:", RawPointer(node->transition()->next));
-#endif
+ if (node->hasStructureTransitionData())
+ out.print(comma, inContext(*node->structureTransitionData().previousStructure, context), " -> ", inContext(*node->structureTransitionData().newStructure, context));
+ if (node->hasFunction()) {
+ out.print(comma, "function(", RawPointer(node->function()), ", ");
+ if (node->function()->inherits(JSFunction::info())) {
+ JSFunction* function = jsCast<JSFunction*>(node->function());
+ if (function->isHostFunction())
+ out.print("<host function>");
+ else
+ out.print(FunctionExecutableDump(function->jsExecutable()));
+ } else
+ out.print("<not JSFunction>");
+ out.print(")");
}
- if (node->hasCellOperand()) {
- if (!node->cellOperand()->value() || !node->cellOperand()->value().isCell())
- out.print(comma, "invalid cell operand: ", node->cellOperand()->value());
- else {
- out.print(comma, pointerDump(node->cellOperand()->value().asCell()));
- if (node->cellOperand()->value().isCell()) {
- CallVariant variant(node->cellOperand()->value().asCell());
- if (ExecutableBase* executable = variant.executable()) {
- if (executable->isHostFunction())
- out.print(comma, "<host function>");
- else if (FunctionExecutable* functionExecutable = jsDynamicCast<FunctionExecutable*>(executable))
- out.print(comma, FunctionExecutableDump(functionExecutable));
- else
- out.print(comma, "<non-function executable>");
- }
- }
- }
+ if (node->hasExecutable()) {
+ if (node->executable()->inherits(FunctionExecutable::info()))
+ out.print(comma, "executable(", FunctionExecutableDump(jsCast<FunctionExecutable*>(node->executable())), ")");
+ else
+ out.print(comma, "executable(not function: ", RawPointer(node->executable()), ")");
+ }
+ if (node->hasFunctionDeclIndex()) {
+ FunctionExecutable* executable = m_codeBlock->functionDecl(node->functionDeclIndex());
+ out.print(comma, FunctionExecutableDump(executable));
+ }
+ if (node->hasFunctionExprIndex()) {
+ FunctionExecutable* executable = m_codeBlock->functionExpr(node->functionExprIndex());
+ out.print(comma, FunctionExecutableDump(executable));
}
if (node->hasStorageAccessData()) {
- StorageAccessData& storageAccessData = node->storageAccessData();
+ StorageAccessData& storageAccessData = m_storageAccessData[node->storageAccessDataIndex()];
out.print(comma, "id", storageAccessData.identifierNumber, "{", identifiers()[storageAccessData.identifierNumber], "}");
out.print(", ", static_cast<ptrdiff_t>(storageAccessData.offset));
}
- if (node->hasMultiGetByOffsetData()) {
- MultiGetByOffsetData& data = node->multiGetByOffsetData();
- out.print(comma, "id", data.identifierNumber, "{", identifiers()[data.identifierNumber], "}");
- for (unsigned i = 0; i < data.cases.size(); ++i)
- out.print(comma, inContext(data.cases[i], context));
- }
- if (node->hasMultiPutByOffsetData()) {
- MultiPutByOffsetData& data = node->multiPutByOffsetData();
- out.print(comma, "id", data.identifierNumber, "{", identifiers()[data.identifierNumber], "}");
- for (unsigned i = 0; i < data.variants.size(); ++i)
- out.print(comma, inContext(data.variants[i], context));
- }
ASSERT(node->hasVariableAccessData(*this) == node->hasLocal(*this));
if (node->hasVariableAccessData(*this)) {
- VariableAccessData* variableAccessData = node->tryGetVariableAccessData();
- if (variableAccessData) {
- VirtualRegister operand = variableAccessData->local();
- out.print(comma, variableAccessData->local(), "(", VariableAccessDataDump(*this, variableAccessData), ")");
- operand = variableAccessData->machineLocal();
- if (operand.isValid())
- out.print(comma, "machine:", operand);
+ VariableAccessData* variableAccessData = node->variableAccessData();
+ VirtualRegister operand = variableAccessData->local();
+ if (operand.isArgument())
+ out.print(comma, "arg", operand.toArgument(), "(", VariableAccessDataDump(*this, variableAccessData), ")");
+ else
+ out.print(comma, "loc", operand.toLocal(), "(", VariableAccessDataDump(*this, variableAccessData), ")");
+
+ operand = variableAccessData->machineLocal();
+ if (operand.isValid()) {
+ if (operand.isArgument())
+ out.print(comma, "machine:arg", operand.toArgument());
+ else
+ out.print(comma, "machine:loc", operand.toLocal());
}
}
- if (node->hasStackAccessData()) {
- StackAccessData* data = node->stackAccessData();
- out.print(comma, data->local);
- if (data->machineLocal.isValid())
- out.print(comma, "machine:", data->machineLocal);
- out.print(comma, data->format);
+ if (node->hasUnlinkedLocal()) {
+ VirtualRegister operand = node->unlinkedLocal();
+ if (operand.isArgument())
+ out.print(comma, "arg", operand.toArgument());
+ else
+ out.print(comma, "loc", operand.toLocal());
}
- if (node->hasUnlinkedLocal())
- out.print(comma, node->unlinkedLocal());
if (node->hasUnlinkedMachineLocal()) {
VirtualRegister operand = node->unlinkedMachineLocal();
- if (operand.isValid())
- out.print(comma, "machine:", operand);
+ if (operand.isValid()) {
+ if (operand.isArgument())
+ out.print(comma, "machine:arg", operand.toArgument());
+ else
+ out.print(comma, "machine:loc", operand.toLocal());
+ }
}
if (node->hasConstantBuffer()) {
out.print(comma);
out.print(node->startConstant(), ":[");
CommaPrinter anotherComma;
for (unsigned i = 0; i < node->numConstants(); ++i)
- out.print(anotherComma, pointerDumpInContext(freeze(m_codeBlock->constantBuffer(node->startConstant())[i]), context));
+ out.print(anotherComma, inContext(m_codeBlock->constantBuffer(node->startConstant())[i], context));
out.print("]");
}
if (node->hasIndexingType())
@@ -307,36 +292,29 @@ void Graph::dump(PrintStream& out, const char* prefix, Node* node, DumpContext*
out.print(comma, "^", node->phi()->index());
if (node->hasExecutionCounter())
out.print(comma, RawPointer(node->executionCounter()));
- if (node->hasWatchpointSet())
- out.print(comma, RawPointer(node->watchpointSet()));
+ if (node->hasVariableWatchpointSet())
+ out.print(comma, RawPointer(node->variableWatchpointSet()));
+ if (node->hasTypedArray())
+ out.print(comma, inContext(JSValue(node->typedArray()), context));
if (node->hasStoragePointer())
out.print(comma, RawPointer(node->storagePointer()));
- if (node->hasObjectMaterializationData())
- out.print(comma, node->objectMaterializationData());
- if (node->hasCallVarargsData())
- out.print(comma, "firstVarArgOffset = ", node->callVarargsData()->firstVarArgOffset);
- if (node->hasLoadVarargsData()) {
- LoadVarargsData* data = node->loadVarargsData();
- out.print(comma, "start = ", data->start, ", count = ", data->count);
- if (data->machineStart.isValid())
- out.print(", machineStart = ", data->machineStart);
- if (data->machineCount.isValid())
- out.print(", machineCount = ", data->machineCount);
- out.print(", offset = ", data->offset, ", mandatoryMinimum = ", data->mandatoryMinimum);
- out.print(", limit = ", data->limit);
- }
- if (node->isConstant())
- out.print(comma, pointerDumpInContext(node->constant(), context));
- if (node->isJump())
- out.print(comma, "T:", *node->targetBlock());
+ if (op == JSConstant) {
+ out.print(comma, "$", node->constantNumber());
+ JSValue value = valueOfJSConstant(node);
+ out.print(" = ", inContext(value, context));
+ }
+ if (op == WeakJSConstant)
+ out.print(comma, RawPointer(node->weakConstant()), " (", inContext(*node->weakConstant()->structure(), context), ")");
+ if (node->isBranch() || node->isJump())
+ out.print(comma, "T:", *node->takenBlock());
if (node->isBranch())
- out.print(comma, "T:", node->branchData()->taken, ", F:", node->branchData()->notTaken);
+ out.print(comma, "F:", *node->notTakenBlock());
if (node->isSwitch()) {
SwitchData* data = node->switchData();
out.print(comma, data->kind);
for (unsigned i = 0; i < data->cases.size(); ++i)
- out.print(comma, inContext(data->cases[i].value, context), ":", data->cases[i].target);
- out.print(comma, "default:", data->fallThrough);
+ out.print(comma, inContext(data->cases[i].value, context), ":", *data->cases[i].target);
+ out.print(comma, "default:", *data->fallThrough);
}
ClobberSet reads;
ClobberSet writes;
@@ -345,58 +323,43 @@ void Graph::dump(PrintStream& out, const char* prefix, Node* node, DumpContext*
out.print(comma, "R:", sortedListDump(reads.direct(), ","));
if (!writes.isEmpty())
out.print(comma, "W:", sortedListDump(writes.direct(), ","));
- if (node->origin.isSet()) {
- out.print(comma, "bc#", node->origin.semantic.bytecodeIndex);
- if (node->origin.semantic != node->origin.forExit)
- out.print(comma, "exit: ", node->origin.forExit);
- }
+ out.print(comma, "bc#", node->codeOrigin.bytecodeIndex);
out.print(")");
- if (node->hasVariableAccessData(*this) && node->tryGetVariableAccessData())
- out.print(" predicting ", SpeculationDump(node->tryGetVariableAccessData()->prediction()));
- else if (node->hasHeapPrediction())
- out.print(" predicting ", SpeculationDump(node->getHeapPrediction()));
+ if (!skipped) {
+ if (node->hasVariableAccessData(*this))
+ out.print(" predicting ", SpeculationDump(node->variableAccessData()->prediction()));
+ else if (node->hasHeapPrediction())
+ out.print(" predicting ", SpeculationDump(node->getHeapPrediction()));
+ }
out.print("\n");
}
-bool Graph::terminalsAreValid()
-{
- for (BasicBlock* block : blocksInNaturalOrder()) {
- if (!block->terminal())
- return false;
- }
- return true;
-}
-
void Graph::dumpBlockHeader(PrintStream& out, const char* prefix, BasicBlock* block, PhiNodeDumpMode phiNodeDumpMode, DumpContext* context)
{
- out.print(prefix, "Block ", *block, " (", inContext(block->at(0)->origin.semantic, context), "):", block->isReachable ? "" : " (skipped)", block->isOSRTarget ? " (OSR target)" : "", "\n");
- if (block->executionCount == block->executionCount)
- out.print(prefix, " Execution count: ", block->executionCount, "\n");
+ out.print(prefix, "Block ", *block, " (", inContext(block->at(0)->codeOrigin, context), "): ", block->isReachable ? "" : "(skipped)", block->isOSRTarget ? " (OSR target)" : "", "\n");
out.print(prefix, " Predecessors:");
for (size_t i = 0; i < block->predecessors.size(); ++i)
out.print(" ", *block->predecessors[i]);
out.print("\n");
- out.print(prefix, " Successors:");
- if (block->terminal()) {
- for (BasicBlock* successor : block->successors()) {
- out.print(" ", *successor);
- if (m_prePostNumbering.isValid())
- out.print(" (", m_prePostNumbering.edgeKind(block, successor), ")");
+ if (m_dominators.isValid()) {
+ out.print(prefix, " Dominated by:");
+ for (size_t i = 0; i < m_blocks.size(); ++i) {
+ if (!m_dominators.dominates(i, block->index))
+ continue;
+ out.print(" #", i);
}
- } else
- out.print(" <invalid>");
- out.print("\n");
- if (m_dominators.isValid() && terminalsAreValid()) {
- out.print(prefix, " Dominated by: ", m_dominators.dominatorsOf(block), "\n");
- out.print(prefix, " Dominates: ", m_dominators.blocksDominatedBy(block), "\n");
- out.print(prefix, " Dominance Frontier: ", m_dominators.dominanceFrontierOf(block), "\n");
- out.print(prefix, " Iterated Dominance Frontier: ", m_dominators.iteratedDominanceFrontierOf(BlockList(1, block)), "\n");
+ out.print("\n");
+ out.print(prefix, " Dominates:");
+ for (size_t i = 0; i < m_blocks.size(); ++i) {
+ if (!m_dominators.dominates(block->index, i))
+ continue;
+ out.print(" #", i);
+ }
+ out.print("\n");
}
- if (m_prePostNumbering.isValid())
- out.print(prefix, " Pre/Post Numbering: ", m_prePostNumbering.preNumber(block), "/", m_prePostNumbering.postNumber(block), "\n");
if (m_naturalLoops.isValid()) {
if (const NaturalLoop* loop = m_naturalLoops.headerOf(block)) {
out.print(prefix, " Loop header, contains:");
@@ -424,7 +387,7 @@ void Graph::dumpBlockHeader(PrintStream& out, const char* prefix, BasicBlock* bl
Node* phiNode = block->phis[i];
if (!phiNode->shouldGenerate() && phiNodeDumpMode == DumpLivePhisOnly)
continue;
- out.print(" @", phiNode->index(), "<", phiNode->local(), ",", phiNode->refCount(), ">->(");
+ out.print(" @", phiNode->index(), "<", phiNode->refCount(), ">->(");
if (phiNode->child1()) {
out.print("@", phiNode->child1()->index());
if (phiNode->child2()) {
@@ -446,14 +409,10 @@ void Graph::dump(PrintStream& out, DumpContext* context)
if (!context)
context = &myContext;
- out.print("\n");
- out.print("DFG for ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT), ":\n");
- out.print(" Fixpoint state: ", m_fixpointState, "; Form: ", m_form, "; Unification state: ", m_unificationState, "; Ref count state: ", m_refCountState, "\n");
- if (m_form == SSA)
- out.print(" Argument formats: ", listDump(m_argumentFormats), "\n");
- else
- out.print(" Arguments: ", listDump(m_arguments), "\n");
- out.print("\n");
+ dataLog("\n");
+ dataLog("DFG for ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT), ":\n");
+ dataLog(" Fixpoint state: ", m_fixpointState, "; Form: ", m_form, "; Unification state: ", m_unificationState, "; Ref count state: ", m_refCountState, "\n");
+ dataLog("\n");
Node* lastNode = 0;
for (size_t b = 0; b < m_blocks.size(); ++b) {
@@ -461,33 +420,22 @@ void Graph::dump(PrintStream& out, DumpContext* context)
if (!block)
continue;
dumpBlockHeader(out, "", block, DumpAllPhis, context);
- out.print(" States: ", block->cfaStructureClobberStateAtHead);
- if (!block->cfaHasVisited)
- out.print(", CurrentlyCFAUnreachable");
- if (!block->intersectionOfCFAHasVisited)
- out.print(", CFAUnreachable");
- out.print("\n");
switch (m_form) {
case LoadStore:
case ThreadedCPS: {
- out.print(" Vars Before: ");
+ out.print(" vars before: ");
if (block->cfaHasVisited)
out.print(inContext(block->valuesAtHead, context));
else
out.print("<empty>");
out.print("\n");
- out.print(" Intersected Vars Before: ");
- if (block->intersectionOfCFAHasVisited)
- out.print(inContext(block->intersectionOfPastValuesAtHead, context));
- else
- out.print("<empty>");
- out.print("\n");
- out.print(" Var Links: ", block->variablesAtHead, "\n");
+ out.print(" var links: ", block->variablesAtHead, "\n");
break;
}
case SSA: {
RELEASE_ASSERT(block->ssa);
+ out.print(" Flush format: ", block->ssa->flushAtHead, "\n");
out.print(" Availability: ", block->ssa->availabilityAtHead, "\n");
out.print(" Live: ", nodeListDump(block->ssa->liveAtHead), "\n");
out.print(" Values: ", nodeMapDump(block->ssa->valuesAtHead, context), "\n");
@@ -498,44 +446,33 @@ void Graph::dump(PrintStream& out, DumpContext* context)
dump(out, "", block->at(i), context);
lastNode = block->at(i);
}
- out.print(" States: ", block->cfaBranchDirection, ", ", block->cfaStructureClobberStateAtTail);
- if (!block->cfaDidFinish)
- out.print(", CFAInvalidated");
- out.print("\n");
switch (m_form) {
case LoadStore:
case ThreadedCPS: {
- out.print(" Vars After: ");
+ out.print(" vars after: ");
if (block->cfaHasVisited)
out.print(inContext(block->valuesAtTail, context));
else
out.print("<empty>");
out.print("\n");
- out.print(" Var Links: ", block->variablesAtTail, "\n");
+ out.print(" var links: ", block->variablesAtTail, "\n");
break;
}
case SSA: {
RELEASE_ASSERT(block->ssa);
+ out.print(" Flush format: ", block->ssa->flushAtTail, "\n");
out.print(" Availability: ", block->ssa->availabilityAtTail, "\n");
out.print(" Live: ", nodeListDump(block->ssa->liveAtTail), "\n");
out.print(" Values: ", nodeMapDump(block->ssa->valuesAtTail, context), "\n");
break;
} }
- out.print("\n");
+ dataLog("\n");
}
- out.print("GC Values:\n");
- for (FrozenValue* value : m_frozenValues) {
- if (value->pointsToHeap())
- out.print(" ", inContext(*value, &myContext), "\n");
- }
-
- out.print(inContext(watchpoints(), &myContext));
-
if (!myContext.isEmpty()) {
- myContext.dump(out);
- out.print("\n");
+ myContext.dump(WTF::dataFile());
+ dataLog("\n");
}
}
@@ -597,113 +534,6 @@ void Graph::resetReachability()
determineReachability();
}
-namespace {
-
-class RefCountCalculator {
-public:
- RefCountCalculator(Graph& graph)
- : m_graph(graph)
- {
- }
-
- void calculate()
- {
- // First reset the counts to 0 for all nodes.
- for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
- BasicBlock* block = m_graph.block(blockIndex);
- if (!block)
- continue;
- for (unsigned indexInBlock = block->size(); indexInBlock--;)
- block->at(indexInBlock)->setRefCount(0);
- for (unsigned phiIndex = block->phis.size(); phiIndex--;)
- block->phis[phiIndex]->setRefCount(0);
- }
-
- // Now find the roots:
- // - Nodes that are must-generate.
- // - Nodes that are reachable from type checks.
- // Set their ref counts to 1 and put them on the worklist.
- for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
- BasicBlock* block = m_graph.block(blockIndex);
- if (!block)
- continue;
- for (unsigned indexInBlock = block->size(); indexInBlock--;) {
- Node* node = block->at(indexInBlock);
- DFG_NODE_DO_TO_CHILDREN(m_graph, node, findTypeCheckRoot);
- if (!(node->flags() & NodeMustGenerate))
- continue;
- if (!node->postfixRef())
- m_worklist.append(node);
- }
- }
-
- while (!m_worklist.isEmpty()) {
- while (!m_worklist.isEmpty()) {
- Node* node = m_worklist.last();
- m_worklist.removeLast();
- ASSERT(node->shouldGenerate()); // It should not be on the worklist unless it's ref'ed.
- DFG_NODE_DO_TO_CHILDREN(m_graph, node, countEdge);
- }
-
- if (m_graph.m_form == SSA) {
- // Find Phi->Upsilon edges, which are represented as meta-data in the
- // Upsilon.
- for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
- BasicBlock* block = m_graph.block(blockIndex);
- if (!block)
- continue;
- for (unsigned nodeIndex = block->size(); nodeIndex--;) {
- Node* node = block->at(nodeIndex);
- if (node->op() != Upsilon)
- continue;
- if (node->shouldGenerate())
- continue;
- if (node->phi()->shouldGenerate())
- countNode(node);
- }
- }
- }
- }
- }
-
-private:
- void findTypeCheckRoot(Node*, Edge edge)
- {
- // We may have an "unproved" untyped use for code that is unreachable. The CFA
- // will just not have gotten around to it.
- if (edge.isProved() || edge.willNotHaveCheck())
- return;
- if (!edge->postfixRef())
- m_worklist.append(edge.node());
- }
-
- void countNode(Node* node)
- {
- if (node->postfixRef())
- return;
- m_worklist.append(node);
- }
-
- void countEdge(Node*, Edge edge)
- {
- // Don't count edges that are already counted for their type checks.
- if (!(edge.isProved() || edge.willNotHaveCheck()))
- return;
- countNode(edge.node());
- }
-
- Graph& m_graph;
- Vector<Node*, 128> m_worklist;
-};
-
-} // anonymous namespace
-
-void Graph::computeRefCounts()
-{
- RefCountCalculator calculator(*this);
- calculator.calculate();
-}
-
void Graph::killBlockAndItsContents(BasicBlock* block)
{
for (unsigned phiIndex = block->phis.size(); phiIndex--;)
@@ -727,15 +557,29 @@ void Graph::killUnreachableBlocks()
}
}
+void Graph::resetExitStates()
+{
+ for (BlockIndex blockIndex = 0; blockIndex < m_blocks.size(); ++blockIndex) {
+ BasicBlock* block = m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ for (unsigned indexInBlock = block->size(); indexInBlock--;)
+ block->at(indexInBlock)->setCanExit(true);
+ }
+}
+
void Graph::invalidateCFG()
{
m_dominators.invalidate();
m_naturalLoops.invalidate();
- m_prePostNumbering.invalidate();
}
void Graph::substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, VariableAccessData* variableAccessData, Node* newGetLocal)
{
+ if (variableAccessData->isCaptured()) {
+ // Let CSE worry about this one.
+ return;
+ }
for (unsigned indexInBlock = startIndexInBlock; indexInBlock < block.size(); ++indexInBlock) {
Node* node = block[indexInBlock];
bool shouldContinue = true;
@@ -765,37 +609,26 @@ void Graph::substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, Va
}
}
-BlockList Graph::blocksInPreOrder()
+void Graph::addForDepthFirstSort(Vector<BasicBlock*>& result, Vector<BasicBlock*, 16>& worklist, HashSet<BasicBlock*>& seen, BasicBlock* block)
{
- BlockList result;
- BlockWorklist worklist;
- worklist.push(block(0));
- while (BasicBlock* block = worklist.pop()) {
- result.append(block);
- for (unsigned i = block->numSuccessors(); i--;)
- worklist.push(block->successor(i));
- }
- return result;
+ if (seen.contains(block))
+ return;
+
+ result.append(block);
+ worklist.append(block);
+ seen.add(block);
}
-BlockList Graph::blocksInPostOrder()
+void Graph::getBlocksInDepthFirstOrder(Vector<BasicBlock*>& result)
{
- BlockList result;
- PostOrderBlockWorklist worklist;
- worklist.push(block(0));
- while (BlockWithOrder item = worklist.pop()) {
- switch (item.order) {
- case PreOrder:
- worklist.pushPost(item.block);
- for (unsigned i = item.block->numSuccessors(); i--;)
- worklist.push(item.block->successor(i));
- break;
- case PostOrder:
- result.append(item.block);
- break;
- }
+ Vector<BasicBlock*, 16> worklist;
+ HashSet<BasicBlock*> seen;
+ addForDepthFirstSort(result, worklist, seen, block(0));
+ while (!worklist.isEmpty()) {
+ BasicBlock* block = worklist.takeLast();
+ for (unsigned i = block->numSuccessors(); i--;)
+ addForDepthFirstSort(result, worklist, seen, block->successor(i));
}
- return result;
}
void Graph::clearReplacements()
@@ -805,22 +638,9 @@ void Graph::clearReplacements()
if (!block)
continue;
for (unsigned phiIndex = block->phis.size(); phiIndex--;)
- block->phis[phiIndex]->setReplacement(nullptr);
- for (unsigned nodeIndex = block->size(); nodeIndex--;)
- block->at(nodeIndex)->setReplacement(nullptr);
- }
-}
-
-void Graph::clearEpochs()
-{
- for (BlockIndex blockIndex = numBlocks(); blockIndex--;) {
- BasicBlock* block = m_blocks[blockIndex].get();
- if (!block)
- continue;
- for (unsigned phiIndex = block->phis.size(); phiIndex--;)
- block->phis[phiIndex]->setEpoch(Epoch());
+ block->phis[phiIndex]->misc.replacement = 0;
for (unsigned nodeIndex = block->size(); nodeIndex--;)
- block->at(nodeIndex)->setEpoch(Epoch());
+ block->at(nodeIndex)->misc.replacement = 0;
}
}
@@ -831,49 +651,12 @@ void Graph::initializeNodeOwners()
if (!block)
continue;
for (unsigned phiIndex = block->phis.size(); phiIndex--;)
- block->phis[phiIndex]->owner = block;
- for (unsigned nodeIndex = block->size(); nodeIndex--;)
- block->at(nodeIndex)->owner = block;
- }
-}
-
-void Graph::clearFlagsOnAllNodes(NodeFlags flags)
-{
- for (BlockIndex blockIndex = numBlocks(); blockIndex--;) {
- BasicBlock* block = m_blocks[blockIndex].get();
- if (!block)
- continue;
- for (unsigned phiIndex = block->phis.size(); phiIndex--;)
- block->phis[phiIndex]->clearFlags(flags);
+ block->phis[phiIndex]->misc.owner = block;
for (unsigned nodeIndex = block->size(); nodeIndex--;)
- block->at(nodeIndex)->clearFlags(flags);
+ block->at(nodeIndex)->misc.owner = block;
}
}
-bool Graph::watchCondition(const ObjectPropertyCondition& key)
-{
- if (!key.isWatchable())
- return false;
-
- m_plan.weakReferences.addLazily(key.object());
- if (key.hasPrototype())
- m_plan.weakReferences.addLazily(key.prototype());
- if (key.hasRequiredValue())
- m_plan.weakReferences.addLazily(key.requiredValue());
-
- m_plan.watchpoints.addLazily(key);
-
- if (key.kind() == PropertyCondition::Presence)
- m_safeToLoad.add(std::make_pair(key.object(), key.offset()));
-
- return true;
-}
-
-bool Graph::isSafeToLoad(JSObject* base, PropertyOffset offset)
-{
- return m_safeToLoad.contains(std::make_pair(base, offset));
-}
-
FullBytecodeLiveness& Graph::livenessFor(CodeBlock* codeBlock)
{
HashMap<CodeBlock*, std::unique_ptr<FullBytecodeLiveness>>::iterator iter = m_bytecodeLiveness.find(codeBlock);
@@ -883,7 +666,7 @@ FullBytecodeLiveness& Graph::livenessFor(CodeBlock* codeBlock)
std::unique_ptr<FullBytecodeLiveness> liveness = std::make_unique<FullBytecodeLiveness>();
codeBlock->livenessAnalysis().computeFullLiveness(*liveness);
FullBytecodeLiveness& result = *liveness;
- m_bytecodeLiveness.add(codeBlock, WTF::move(liveness));
+ m_bytecodeLiveness.add(codeBlock, std::move(liveness));
return result;
}
@@ -892,40 +675,22 @@ FullBytecodeLiveness& Graph::livenessFor(InlineCallFrame* inlineCallFrame)
return livenessFor(baselineCodeBlockFor(inlineCallFrame));
}
-BytecodeKills& Graph::killsFor(CodeBlock* codeBlock)
-{
- HashMap<CodeBlock*, std::unique_ptr<BytecodeKills>>::iterator iter = m_bytecodeKills.find(codeBlock);
- if (iter != m_bytecodeKills.end())
- return *iter->value;
-
- std::unique_ptr<BytecodeKills> kills = std::make_unique<BytecodeKills>();
- codeBlock->livenessAnalysis().computeKills(*kills);
- BytecodeKills& result = *kills;
- m_bytecodeKills.add(codeBlock, WTF::move(kills));
- return result;
-}
-
-BytecodeKills& Graph::killsFor(InlineCallFrame* inlineCallFrame)
-{
- return killsFor(baselineCodeBlockFor(inlineCallFrame));
-}
-
bool Graph::isLiveInBytecode(VirtualRegister operand, CodeOrigin codeOrigin)
{
for (;;) {
- VirtualRegister reg = VirtualRegister(
- operand.offset() - codeOrigin.stackOffset());
-
if (operand.offset() < codeOrigin.stackOffset() + JSStack::CallFrameHeaderSize) {
+ VirtualRegister reg = VirtualRegister(
+ operand.offset() - codeOrigin.stackOffset());
+
if (reg.isArgument()) {
RELEASE_ASSERT(reg.offset() < JSStack::CallFrameHeaderSize);
- if (codeOrigin.inlineCallFrame->isClosureCall
- && reg.offset() == JSStack::Callee)
- return true;
+ if (!codeOrigin.inlineCallFrame->isClosureCall)
+ return false;
- if (codeOrigin.inlineCallFrame->isVarargs()
- && reg.offset() == JSStack::ArgumentCount)
+ if (reg.offset() == JSStack::Callee)
+ return true;
+ if (reg.offset() == JSStack::ScopeChain)
return true;
return false;
@@ -935,50 +700,24 @@ bool Graph::isLiveInBytecode(VirtualRegister operand, CodeOrigin codeOrigin)
reg.offset(), codeOrigin.bytecodeIndex);
}
- InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
- if (!inlineCallFrame)
+ if (!codeOrigin.inlineCallFrame)
break;
-
- // Arguments are always live. This would be redundant if it wasn't for our
- // op_call_varargs inlining.
- if (reg.isArgument()
- && static_cast<size_t>(reg.toArgument()) < inlineCallFrame->arguments.size())
- return true;
- codeOrigin = inlineCallFrame->caller;
+ codeOrigin = codeOrigin.inlineCallFrame->caller;
}
return true;
}
-BitVector Graph::localsLiveInBytecode(CodeOrigin codeOrigin)
-{
- BitVector result;
- result.ensureSize(block(0)->variablesAtHead.numberOfLocals());
- forAllLocalsLiveInBytecode(
- codeOrigin,
- [&] (VirtualRegister reg) {
- ASSERT(reg.isLocal());
- result.quickSet(reg.toLocal());
- });
- return result;
-}
-
unsigned Graph::frameRegisterCount()
{
- unsigned result = m_nextMachineLocal + std::max(m_parameterSlots, static_cast<unsigned>(maxFrameExtentForSlowPathCallInRegisters));
- return roundLocalRegisterCountForFramePointerOffset(result);
-}
-
-unsigned Graph::stackPointerOffset()
-{
- return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
+ return m_nextMachineLocal + m_parameterSlots;
}
unsigned Graph::requiredRegisterCountForExit()
{
unsigned count = JIT::frameRegisterCountFor(m_profiledBlock);
- for (InlineCallFrameSet::iterator iter = m_plan.inlineCallFrames->begin(); !!iter; ++iter) {
+ for (InlineCallFrameSet::iterator iter = m_inlineCallFrames->begin(); !!iter; ++iter) {
InlineCallFrame* inlineCallFrame = *iter;
CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
unsigned requiredCount = VirtualRegister(inlineCallFrame->stackOffset).toLocal() + 1 + JIT::frameRegisterCountFor(codeBlock);
@@ -992,384 +731,47 @@ unsigned Graph::requiredRegisterCountForExecutionAndExit()
return std::max(frameRegisterCount(), requiredRegisterCountForExit());
}
-JSValue Graph::tryGetConstantProperty(
- JSValue base, const StructureSet& structureSet, PropertyOffset offset)
-{
- if (!base || !base.isObject())
- return JSValue();
-
- JSObject* object = asObject(base);
-
- for (unsigned i = structureSet.size(); i--;) {
- Structure* structure = structureSet[i];
- assertIsRegistered(structure);
-
- WatchpointSet* set = structure->propertyReplacementWatchpointSet(offset);
- if (!set || !set->isStillValid())
- return JSValue();
-
- ASSERT(structure->isValidOffset(offset));
- ASSERT(!structure->isUncacheableDictionary());
-
- watchpoints().addLazily(set);
- }
-
- // What follows may require some extra thought. We need this load to load a valid JSValue. If
- // our profiling makes sense and we're still on track to generate code that won't be
- // invalidated, then we have nothing to worry about. We do, however, have to worry about
- // loading - and then using - an invalid JSValue in the case that unbeknownst to us our code
- // is doomed.
- //
- // One argument in favor of this code is that it should definitely work because the butterfly
- // is always set before the structure. However, we don't currently have a fence between those
- // stores. It's not clear if this matters, however. We don't ever shrink the property storage.
- // So, for this to fail, you'd need an access on a constant object pointer such that the inline
- // caches told us that the object had a structure that it did not *yet* have, and then later,
- // the object transitioned to that structure that the inline caches had alraedy seen. And then
- // the processor reordered the stores. Seems unlikely and difficult to test. I believe that
- // this is worth revisiting but it isn't worth losing sleep over. Filed:
- // https://bugs.webkit.org/show_bug.cgi?id=134641
- //
- // For now, we just do the minimal thing: defend against the structure right now being
- // incompatible with the getDirect we're trying to do. The easiest way to do that is to
- // determine if the structure belongs to the proven set.
-
- if (!structureSet.contains(object->structure()))
- return JSValue();
-
- return object->getDirect(offset);
-}
-
-JSValue Graph::tryGetConstantProperty(JSValue base, Structure* structure, PropertyOffset offset)
-{
- return tryGetConstantProperty(base, StructureSet(structure), offset);
-}
-
-JSValue Graph::tryGetConstantProperty(
- JSValue base, const StructureAbstractValue& structure, PropertyOffset offset)
-{
- if (structure.isTop() || structure.isClobbered()) {
- // FIXME: If we just converted the offset to a uid, we could do ObjectPropertyCondition
- // watching to constant-fold the property.
- // https://bugs.webkit.org/show_bug.cgi?id=147271
- return JSValue();
- }
-
- return tryGetConstantProperty(base, structure.set(), offset);
-}
-
-JSValue Graph::tryGetConstantProperty(const AbstractValue& base, PropertyOffset offset)
+JSActivation* Graph::tryGetActivation(Node* node)
{
- return tryGetConstantProperty(base.m_value, base.m_structure, offset);
+ if (!node->hasConstant())
+ return 0;
+ return jsDynamicCast<JSActivation*>(valueOfJSConstant(node));
}
-JSValue Graph::tryGetConstantClosureVar(JSValue base, ScopeOffset offset)
+WriteBarrierBase<Unknown>* Graph::tryGetRegisters(Node* node)
{
- // This has an awesome concurrency story. See comment for GetGlobalVar in ByteCodeParser.
-
- if (!base)
- return JSValue();
-
- JSLexicalEnvironment* activation = jsDynamicCast<JSLexicalEnvironment*>(base);
+ JSActivation* activation = tryGetActivation(node);
if (!activation)
- return JSValue();
-
- SymbolTable* symbolTable = activation->symbolTable();
- JSValue value;
- WatchpointSet* set;
- {
- ConcurrentJITLocker locker(symbolTable->m_lock);
-
- SymbolTableEntry* entry = symbolTable->entryFor(locker, offset);
- if (!entry)
- return JSValue();
-
- set = entry->watchpointSet();
- if (!set)
- return JSValue();
-
- if (set->state() != IsWatched)
- return JSValue();
-
- ASSERT(entry->scopeOffset() == offset);
- value = activation->variableAt(offset).get();
- if (!value)
- return JSValue();
- }
-
- watchpoints().addLazily(set);
-
- return value;
-}
-
-JSValue Graph::tryGetConstantClosureVar(const AbstractValue& value, ScopeOffset offset)
-{
- return tryGetConstantClosureVar(value.m_value, offset);
+ return 0;
+ if (!activation->isTornOff())
+ return 0;
+ return activation->registers();
}
-JSValue Graph::tryGetConstantClosureVar(Node* node, ScopeOffset offset)
+JSArrayBufferView* Graph::tryGetFoldableView(Node* node)
{
if (!node->hasConstant())
- return JSValue();
- return tryGetConstantClosureVar(node->asJSValue(), offset);
-}
-
-JSArrayBufferView* Graph::tryGetFoldableView(JSValue value)
-{
- if (!value)
- return nullptr;
- JSArrayBufferView* view = jsDynamicCast<JSArrayBufferView*>(value);
- if (!value)
- return nullptr;
- if (!view->length())
- return nullptr;
- WTF::loadLoadFence();
- watchpoints().addLazily(view);
+ return 0;
+ JSArrayBufferView* view = jsDynamicCast<JSArrayBufferView*>(valueOfJSConstant(node));
+ if (!view)
+ return 0;
+ if (!watchpoints().isStillValid(view))
+ return 0;
return view;
}
-JSArrayBufferView* Graph::tryGetFoldableView(JSValue value, ArrayMode arrayMode)
+JSArrayBufferView* Graph::tryGetFoldableView(Node* node, ArrayMode arrayMode)
{
if (arrayMode.typedArrayType() == NotTypedArray)
- return nullptr;
- return tryGetFoldableView(value);
-}
-
-void Graph::registerFrozenValues()
-{
- m_codeBlock->constants().resize(0);
- m_codeBlock->constantsSourceCodeRepresentation().resize(0);
- for (FrozenValue* value : m_frozenValues) {
- if (!value->pointsToHeap())
- continue;
-
- ASSERT(value->structure());
- ASSERT(m_plan.weakReferences.contains(value->structure()));
-
- switch (value->strength()) {
- case WeakValue: {
- m_plan.weakReferences.addLazily(value->value().asCell());
- break;
- }
- case StrongValue: {
- unsigned constantIndex = m_codeBlock->addConstantLazily();
- // We already have a barrier on the code block.
- m_codeBlock->constants()[constantIndex].setWithoutWriteBarrier(value->value());
- break;
- } }
- }
- m_codeBlock->constants().shrinkToFit();
- m_codeBlock->constantsSourceCodeRepresentation().shrinkToFit();
-}
-
-void Graph::visitChildren(SlotVisitor& visitor)
-{
- for (FrozenValue* value : m_frozenValues) {
- visitor.appendUnbarrieredReadOnlyValue(value->value());
- visitor.appendUnbarrieredReadOnlyPointer(value->structure());
- }
-
- for (BlockIndex blockIndex = numBlocks(); blockIndex--;) {
- BasicBlock* block = this->block(blockIndex);
- if (!block)
- continue;
-
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
-
- switch (node->op()) {
- case CheckStructure:
- for (unsigned i = node->structureSet().size(); i--;)
- visitor.appendUnbarrieredReadOnlyPointer(node->structureSet()[i]);
- break;
-
- case NewObject:
- case ArrayifyToStructure:
- case NewStringObject:
- visitor.appendUnbarrieredReadOnlyPointer(node->structure());
- break;
-
- case PutStructure:
- case AllocatePropertyStorage:
- case ReallocatePropertyStorage:
- visitor.appendUnbarrieredReadOnlyPointer(
- node->transition()->previous);
- visitor.appendUnbarrieredReadOnlyPointer(
- node->transition()->next);
- break;
-
- case MultiGetByOffset:
- for (const MultiGetByOffsetCase& getCase : node->multiGetByOffsetData().cases) {
- for (Structure* structure : getCase.set())
- visitor.appendUnbarrieredReadOnlyPointer(structure);
- }
- break;
-
- case MultiPutByOffset:
- for (unsigned i = node->multiPutByOffsetData().variants.size(); i--;) {
- PutByIdVariant& variant = node->multiPutByOffsetData().variants[i];
- const StructureSet& set = variant.oldStructure();
- for (unsigned j = set.size(); j--;)
- visitor.appendUnbarrieredReadOnlyPointer(set[j]);
- if (variant.kind() == PutByIdVariant::Transition)
- visitor.appendUnbarrieredReadOnlyPointer(variant.newStructure());
- }
- break;
-
- default:
- break;
- }
- }
- }
-}
-
-FrozenValue* Graph::freeze(JSValue value)
-{
- if (UNLIKELY(!value))
- return FrozenValue::emptySingleton();
-
- auto result = m_frozenValueMap.add(JSValue::encode(value), nullptr);
- if (LIKELY(!result.isNewEntry))
- return result.iterator->value;
-
- if (value.isUInt32())
- m_uint32ValuesInUse.append(value.asUInt32());
-
- FrozenValue frozenValue = FrozenValue::freeze(value);
- if (Structure* structure = frozenValue.structure())
- registerStructure(structure);
-
- return result.iterator->value = m_frozenValues.add(frozenValue);
-}
-
-FrozenValue* Graph::freezeStrong(JSValue value)
-{
- FrozenValue* result = freeze(value);
- result->strengthenTo(StrongValue);
- return result;
-}
-
-void Graph::convertToConstant(Node* node, FrozenValue* value)
-{
- if (value->structure())
- assertIsRegistered(value->structure());
- node->convertToConstant(value);
-}
-
-void Graph::convertToConstant(Node* node, JSValue value)
-{
- convertToConstant(node, freeze(value));
-}
-
-void Graph::convertToStrongConstant(Node* node, JSValue value)
-{
- convertToConstant(node, freezeStrong(value));
-}
-
-StructureRegistrationResult Graph::registerStructure(Structure* structure)
-{
- m_plan.weakReferences.addLazily(structure);
- if (m_plan.watchpoints.consider(structure))
- return StructureRegisteredAndWatched;
- return StructureRegisteredNormally;
-}
-
-void Graph::assertIsRegistered(Structure* structure)
-{
- // It's convenient to be able to call this with a maybe-null structure.
- if (!structure)
- return;
-
- DFG_ASSERT(*this, nullptr, m_plan.weakReferences.contains(structure));
-
- if (!structure->dfgShouldWatch())
- return;
- if (watchpoints().isWatched(structure->transitionWatchpointSet()))
- return;
-
- DFG_CRASH(*this, nullptr, toCString("Structure ", pointerDump(structure), " is watchable but isn't being watched.").data());
-}
-
-NO_RETURN_DUE_TO_CRASH static void crash(
- Graph& graph, const CString& whileText, const char* file, int line, const char* function,
- const char* assertion)
-{
- startCrashing();
- dataLog("DFG ASSERTION FAILED: ", assertion, "\n");
- dataLog(file, "(", line, ") : ", function, "\n");
- dataLog("\n");
- dataLog(whileText);
- dataLog("Graph at time of failure:\n");
- graph.dump();
- dataLog("\n");
- dataLog("DFG ASSERTION FAILED: ", assertion, "\n");
- dataLog(file, "(", line, ") : ", function, "\n");
- CRASH_WITH_SECURITY_IMPLICATION();
-}
-
-void Graph::handleAssertionFailure(
- std::nullptr_t, const char* file, int line, const char* function, const char* assertion)
-{
- crash(*this, "", file, line, function, assertion);
-}
-
-void Graph::handleAssertionFailure(
- Node* node, const char* file, int line, const char* function, const char* assertion)
-{
- crash(*this, toCString("While handling node ", node, "\n\n"), file, line, function, assertion);
-}
-
-void Graph::handleAssertionFailure(
- BasicBlock* block, const char* file, int line, const char* function, const char* assertion)
-{
- crash(*this, toCString("While handling block ", pointerDump(block), "\n\n"), file, line, function, assertion);
-}
-
-ValueProfile* Graph::valueProfileFor(Node* node)
-{
- if (!node)
- return nullptr;
-
- CodeBlock* profiledBlock = baselineCodeBlockFor(node->origin.semantic);
-
- if (node->hasLocal(*this)) {
- if (!node->local().isArgument())
- return nullptr;
- int argument = node->local().toArgument();
- Node* argumentNode = m_arguments[argument];
- if (!argumentNode)
- return nullptr;
- if (node->variableAccessData() != argumentNode->variableAccessData())
- return nullptr;
- return profiledBlock->valueProfileForArgument(argument);
- }
-
- if (node->hasHeapPrediction())
- return profiledBlock->valueProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex);
-
- return nullptr;
+ return 0;
+ return tryGetFoldableView(node);
}
-MethodOfGettingAValueProfile Graph::methodOfGettingAValueProfileFor(Node* node)
+JSArrayBufferView* Graph::tryGetFoldableViewForChild1(Node* node)
{
- if (!node)
- return MethodOfGettingAValueProfile();
-
- if (ValueProfile* valueProfile = valueProfileFor(node))
- return MethodOfGettingAValueProfile(valueProfile);
-
- if (node->op() == GetLocal) {
- CodeBlock* profiledBlock = baselineCodeBlockFor(node->origin.semantic);
-
- return MethodOfGettingAValueProfile::fromLazyOperand(
- profiledBlock,
- LazyOperandValueProfileKey(
- node->origin.semantic.bytecodeIndex, node->local()));
- }
-
- return MethodOfGettingAValueProfile();
+ return tryGetFoldableView(child(node, 0).node(), node->arrayMode());
}
} } // namespace JSC::DFG
-#endif // ENABLE(DFG_JIT)
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h
index e9926db46..7a5170048 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.h
+++ b/Source/JavaScriptCore/dfg/DFGGraph.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,26 +26,24 @@
#ifndef DFGGraph_h
#define DFGGraph_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "AssemblyHelpers.h"
-#include "BytecodeLivenessAnalysisInlines.h"
#include "CodeBlock.h"
#include "DFGArgumentPosition.h"
#include "DFGBasicBlock.h"
#include "DFGDominators.h"
-#include "DFGFrozenValue.h"
#include "DFGLongLivedState.h"
#include "DFGNaturalLoops.h"
#include "DFGNode.h"
#include "DFGNodeAllocator.h"
#include "DFGPlan.h"
-#include "DFGPrePostNumbering.h"
-#include "DFGScannable.h"
-#include "FullBytecodeLiveness.h"
+#include "DFGVariadicFunction.h"
+#include "InlineCallFrameSet.h"
#include "JSStack.h"
#include "MethodOfGettingAValueProfile.h"
-#include <unordered_map>
#include <wtf/BitVector.h>
#include <wtf/HashMap.h>
#include <wtf/Vector.h>
@@ -58,47 +56,10 @@ class ExecState;
namespace DFG {
-#define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
- Node* _node = (node); \
- if (_node->flags() & NodeHasVarArgs) { \
- for (unsigned _childIdx = _node->firstChild(); \
- _childIdx < _node->firstChild() + _node->numChildren(); \
- _childIdx++) { \
- if (!!(graph).m_varArgChildren[_childIdx]) \
- thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
- } \
- } else { \
- if (!_node->child1()) { \
- ASSERT( \
- !_node->child2() \
- && !_node->child3()); \
- break; \
- } \
- thingToDo(_node, _node->child1()); \
- \
- if (!_node->child2()) { \
- ASSERT(!_node->child3()); \
- break; \
- } \
- thingToDo(_node, _node->child2()); \
- \
- if (!_node->child3()) \
- break; \
- thingToDo(_node, _node->child3()); \
- } \
- } while (false)
-
-#define DFG_ASSERT(graph, node, assertion) do { \
- if (!!(assertion)) \
- break; \
- (graph).handleAssertionFailure( \
- (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
- } while (false)
-
-#define DFG_CRASH(graph, node, reason) do { \
- (graph).handleAssertionFailure( \
- (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, (reason)); \
- } while (false)
+struct StorageAccessData {
+ PropertyOffset offset;
+ unsigned identifierNumber;
+};
struct InlineVariableData {
InlineCallFrame* inlineCallFrame;
@@ -117,7 +78,7 @@ enum AddSpeculationMode {
//
// The order may be significant for nodes with side-effects (property accesses, value conversions).
// Nodes that are 'dead' remain in the vector with refCount 0.
-class Graph : public virtual Scannable {
+class Graph {
public:
Graph(VM&, Plan&, LongLivedState&);
~Graph();
@@ -165,7 +126,7 @@ public:
return;
// Check if there is any replacement.
- Node* replacement = child->replacement();
+ Node* replacement = child->misc.replacement;
if (!replacement)
return;
@@ -173,36 +134,55 @@ public:
// There is definitely a replacement. Assert that the replacement does not
// have a replacement.
- ASSERT(!child->replacement());
+ ASSERT(!child->misc.replacement);
}
- template<typename... Params>
- Node* addNode(SpeculatedType type, Params... params)
- {
- Node* node = new (m_allocator) Node(params...);
- node->predict(type);
- return node;
+#define DFG_DEFINE_ADD_NODE(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) \
+ templatePre typeParams templatePost Node* addNode(SpeculatedType type valueParamsComma valueParams) \
+ { \
+ Node* node = new (m_allocator) Node(valueArgs); \
+ node->predict(type); \
+ return node; \
}
+ DFG_VARIADIC_TEMPLATE_FUNCTION(DFG_DEFINE_ADD_NODE)
+#undef DFG_DEFINE_ADD_NODE
void dethread();
- FrozenValue* freeze(JSValue); // We use weak freezing by default.
- FrozenValue* freezeStrong(JSValue); // Shorthand for freeze(value)->strengthenTo(StrongValue).
-
- void convertToConstant(Node* node, FrozenValue* value);
- void convertToConstant(Node* node, JSValue value);
- void convertToStrongConstant(Node* node, JSValue value);
-
- StructureRegistrationResult registerStructure(Structure* structure);
- void assertIsRegistered(Structure* structure);
+ void convertToConstant(Node* node, unsigned constantNumber)
+ {
+ if (node->op() == GetLocal)
+ dethread();
+ else
+ ASSERT(!node->hasVariableAccessData(*this));
+ node->convertToConstant(constantNumber);
+ }
+
+ unsigned constantRegisterForConstant(JSValue value)
+ {
+ unsigned constantRegister;
+ if (!m_codeBlock->findConstant(value, constantRegister)) {
+ constantRegister = m_codeBlock->addConstantLazily();
+ initializeLazyWriteBarrierForConstant(
+ m_plan.writeBarriers,
+ m_codeBlock->constants()[constantRegister],
+ m_codeBlock,
+ constantRegister,
+ m_codeBlock->ownerExecutable(),
+ value);
+ }
+ return constantRegister;
+ }
+ void convertToConstant(Node* node, JSValue value)
+ {
+ convertToConstant(node, constantRegisterForConstant(value));
+ }
+
// CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
void dump(PrintStream& = WTF::dataFile(), DumpContext* = 0);
-
- bool terminalsAreValid();
-
enum PhiNodeDumpMode { DumpLivePhisOnly, DumpAllPhis };
- void dumpBlockHeader(PrintStream&, const char* prefix, BasicBlock*, PhiNodeDumpMode, DumpContext*);
+ void dumpBlockHeader(PrintStream&, const char* prefix, BasicBlock*, PhiNodeDumpMode, DumpContext* context);
void dump(PrintStream&, Edge);
void dump(PrintStream&, const char* prefix, Node*, DumpContext* = 0);
static int amountOfNodeWhiteSpace(Node*);
@@ -210,54 +190,49 @@ public:
// Dump the code origin of the given node as a diff from the code origin of the
// preceding node. Returns true if anything was printed.
- bool dumpCodeOrigin(PrintStream&, const char* prefix, Node* previousNode, Node* currentNode, DumpContext*);
+ bool dumpCodeOrigin(PrintStream&, const char* prefix, Node* previousNode, Node* currentNode, DumpContext* context);
- AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInt32, bool rightShouldSpeculateInt32, PredictionPass pass)
+ SpeculatedType getJSConstantSpeculation(Node* node)
+ {
+ return speculationFromValue(node->valueOfJSConstant(m_codeBlock));
+ }
+
+ AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInt32, bool rightShouldSpeculateInt32)
{
ASSERT(add->op() == ValueAdd || add->op() == ArithAdd || add->op() == ArithSub);
- RareCaseProfilingSource source = add->sourceFor(pass);
-
Node* left = add->child1().node();
Node* right = add->child2().node();
if (left->hasConstant())
- return addImmediateShouldSpeculateInt32(add, rightShouldSpeculateInt32, right, left, source);
+ return addImmediateShouldSpeculateInt32(add, rightShouldSpeculateInt32, left);
if (right->hasConstant())
- return addImmediateShouldSpeculateInt32(add, leftShouldSpeculateInt32, left, right, source);
+ return addImmediateShouldSpeculateInt32(add, leftShouldSpeculateInt32, right);
- return (leftShouldSpeculateInt32 && rightShouldSpeculateInt32 && add->canSpeculateInt32(source)) ? SpeculateInt32 : DontSpeculateInt32;
+ return (leftShouldSpeculateInt32 && rightShouldSpeculateInt32 && add->canSpeculateInt32()) ? SpeculateInt32 : DontSpeculateInt32;
}
- AddSpeculationMode valueAddSpeculationMode(Node* add, PredictionPass pass)
+ AddSpeculationMode valueAddSpeculationMode(Node* add)
{
- return addSpeculationMode(
- add,
- add->child1()->shouldSpeculateInt32OrBooleanExpectingDefined(),
- add->child2()->shouldSpeculateInt32OrBooleanExpectingDefined(),
- pass);
+ return addSpeculationMode(add, add->child1()->shouldSpeculateInt32ExpectingDefined(), add->child2()->shouldSpeculateInt32ExpectingDefined());
}
- AddSpeculationMode arithAddSpeculationMode(Node* add, PredictionPass pass)
+ AddSpeculationMode arithAddSpeculationMode(Node* add)
{
- return addSpeculationMode(
- add,
- add->child1()->shouldSpeculateInt32OrBooleanForArithmetic(),
- add->child2()->shouldSpeculateInt32OrBooleanForArithmetic(),
- pass);
+ return addSpeculationMode(add, add->child1()->shouldSpeculateInt32ForArithmetic(), add->child2()->shouldSpeculateInt32ForArithmetic());
}
- AddSpeculationMode addSpeculationMode(Node* add, PredictionPass pass)
+ AddSpeculationMode addSpeculationMode(Node* add)
{
if (add->op() == ValueAdd)
- return valueAddSpeculationMode(add, pass);
+ return valueAddSpeculationMode(add);
- return arithAddSpeculationMode(add, pass);
+ return arithAddSpeculationMode(add);
}
- bool addShouldSpeculateInt32(Node* add, PredictionPass pass)
+ bool addShouldSpeculateInt32(Node* add)
{
- return addSpeculationMode(add, pass) != DontSpeculateInt32;
+ return addSpeculationMode(add) != DontSpeculateInt32;
}
bool addShouldSpeculateMachineInt(Node* add)
@@ -268,22 +243,27 @@ public:
Node* left = add->child1().node();
Node* right = add->child2().node();
- bool speculation = Node::shouldSpeculateMachineInt(left, right);
+ bool speculation;
+ if (add->op() == ValueAdd)
+ speculation = Node::shouldSpeculateMachineInt(left, right);
+ else
+ speculation = Node::shouldSpeculateMachineInt(left, right);
+
return speculation && !hasExitSite(add, Int52Overflow);
}
- bool mulShouldSpeculateInt32(Node* mul, PredictionPass pass)
+ bool mulShouldSpeculateInt32(Node* mul)
{
ASSERT(mul->op() == ArithMul);
Node* left = mul->child1().node();
Node* right = mul->child2().node();
- return Node::shouldSpeculateInt32OrBooleanForArithmetic(left, right)
- && mul->canSpeculateInt32(mul->sourceFor(pass));
+ return Node::shouldSpeculateInt32ForArithmetic(left, right)
+ && mul->canSpeculateInt32();
}
- bool mulShouldSpeculateMachineInt(Node* mul, PredictionPass pass)
+ bool mulShouldSpeculateMachineInt(Node* mul)
{
ASSERT(mul->op() == ArithMul);
@@ -294,43 +274,124 @@ public:
Node* right = mul->child2().node();
return Node::shouldSpeculateMachineInt(left, right)
- && mul->canSpeculateInt52(pass)
+ && mul->canSpeculateInt52()
&& !hasExitSite(mul, Int52Overflow);
}
- bool negateShouldSpeculateInt32(Node* negate, PredictionPass pass)
+ bool negateShouldSpeculateInt32(Node* negate)
{
ASSERT(negate->op() == ArithNegate);
- return negate->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
- && negate->canSpeculateInt32(pass);
+ return negate->child1()->shouldSpeculateInt32ForArithmetic() && negate->canSpeculateInt32();
}
- bool negateShouldSpeculateMachineInt(Node* negate, PredictionPass pass)
+ bool negateShouldSpeculateMachineInt(Node* negate)
{
ASSERT(negate->op() == ArithNegate);
if (!enableInt52())
return false;
return negate->child1()->shouldSpeculateMachineInt()
&& !hasExitSite(negate, Int52Overflow)
- && negate->canSpeculateInt52(pass);
+ && negate->canSpeculateInt52();
}
-
- bool roundShouldSpeculateInt32(Node* arithRound, PredictionPass pass)
+
+ VirtualRegister bytecodeRegisterForArgument(CodeOrigin codeOrigin, int argument)
{
- ASSERT(arithRound->op() == ArithRound);
- return arithRound->canSpeculateInt32(pass) && !hasExitSite(arithRound->origin.semantic, Overflow) && !hasExitSite(arithRound->origin.semantic, NegativeZero);
+ return VirtualRegister(
+ codeOrigin.inlineCallFrame->stackOffset +
+ baselineCodeBlockFor(codeOrigin)->argumentIndexAfterCapture(argument));
}
+ // Helper methods to check nodes for constants.
+ bool isConstant(Node* node)
+ {
+ return node->hasConstant();
+ }
+ bool isJSConstant(Node* node)
+ {
+ return node->hasConstant();
+ }
+ bool isInt32Constant(Node* node)
+ {
+ return node->isInt32Constant(m_codeBlock);
+ }
+ bool isDoubleConstant(Node* node)
+ {
+ return node->isDoubleConstant(m_codeBlock);
+ }
+ bool isNumberConstant(Node* node)
+ {
+ return node->isNumberConstant(m_codeBlock);
+ }
+ bool isBooleanConstant(Node* node)
+ {
+ return node->isBooleanConstant(m_codeBlock);
+ }
+ bool isCellConstant(Node* node)
+ {
+ if (!isJSConstant(node))
+ return false;
+ JSValue value = valueOfJSConstant(node);
+ return value.isCell() && !!value;
+ }
+ bool isFunctionConstant(Node* node)
+ {
+ if (!isJSConstant(node))
+ return false;
+ if (!getJSFunction(valueOfJSConstant(node)))
+ return false;
+ return true;
+ }
+ bool isInternalFunctionConstant(Node* node)
+ {
+ if (!isJSConstant(node))
+ return false;
+ JSValue value = valueOfJSConstant(node);
+ if (!value.isCell() || !value)
+ return false;
+ JSCell* cell = value.asCell();
+ if (!cell->inherits(InternalFunction::info()))
+ return false;
+ return true;
+ }
+ // Helper methods get constant values from nodes.
+ JSValue valueOfJSConstant(Node* node)
+ {
+ return node->valueOfJSConstant(m_codeBlock);
+ }
+ int32_t valueOfInt32Constant(Node* node)
+ {
+ return valueOfJSConstant(node).asInt32();
+ }
+ double valueOfNumberConstant(Node* node)
+ {
+ return valueOfJSConstant(node).asNumber();
+ }
+ bool valueOfBooleanConstant(Node* node)
+ {
+ return valueOfJSConstant(node).asBoolean();
+ }
+ JSFunction* valueOfFunctionConstant(Node* node)
+ {
+ JSCell* function = getJSFunction(valueOfJSConstant(node));
+ ASSERT(function);
+ return jsCast<JSFunction*>(function);
+ }
+
static const char *opName(NodeType);
StructureSet* addStructureSet(const StructureSet& structureSet)
{
- for (Structure* structure : structureSet)
- registerStructure(structure);
+ ASSERT(structureSet.size());
m_structureSet.append(structureSet);
return &m_structureSet.last();
}
+ StructureTransitionData* addStructureTransitionData(const StructureTransitionData& structureTransitionData)
+ {
+ m_structureTransitionData.append(structureTransitionData);
+ return &m_structureTransitionData.last();
+ }
+
JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
{
return m_codeBlock->globalObjectFor(codeOrigin);
@@ -381,7 +442,8 @@ public:
bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin)
{
- return globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid();
+ return m_plan.watchpoints.isStillValid(
+ globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint());
}
bool hasGlobalExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
@@ -396,7 +458,53 @@ public:
bool hasExitSite(Node* node, ExitKind exitKind)
{
- return hasExitSite(node->origin.semantic, exitKind);
+ return hasExitSite(node->codeOrigin, exitKind);
+ }
+
+ VirtualRegister argumentsRegisterFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return m_profiledBlock->argumentsRegister();
+
+ return VirtualRegister(baselineCodeBlockForInlineCallFrame(
+ inlineCallFrame)->argumentsRegister().offset() +
+ inlineCallFrame->stackOffset);
+ }
+
+ VirtualRegister argumentsRegisterFor(const CodeOrigin& codeOrigin)
+ {
+ return argumentsRegisterFor(codeOrigin.inlineCallFrame);
+ }
+
+ VirtualRegister machineArgumentsRegisterFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return m_codeBlock->argumentsRegister();
+
+ return inlineCallFrame->argumentsRegister;
+ }
+
+ VirtualRegister machineArgumentsRegisterFor(const CodeOrigin& codeOrigin)
+ {
+ return machineArgumentsRegisterFor(codeOrigin.inlineCallFrame);
+ }
+
+ VirtualRegister uncheckedArgumentsRegisterFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return m_profiledBlock->uncheckedArgumentsRegister();
+
+ CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
+ if (!codeBlock->usesArguments())
+ return VirtualRegister();
+
+ return VirtualRegister(codeBlock->argumentsRegister().offset() +
+ inlineCallFrame->stackOffset);
+ }
+
+ VirtualRegister uncheckedArgumentsRegisterFor(const CodeOrigin& codeOrigin)
+ {
+ return uncheckedArgumentsRegisterFor(codeOrigin.inlineCallFrame);
}
VirtualRegister activationRegister()
@@ -419,8 +527,59 @@ public:
return m_profiledBlock->uncheckedActivationRegister();
}
- ValueProfile* valueProfileFor(Node*);
- MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node*);
+ ValueProfile* valueProfileFor(Node* node)
+ {
+ if (!node)
+ return 0;
+
+ CodeBlock* profiledBlock = baselineCodeBlockFor(node->codeOrigin);
+
+ if (node->op() == GetArgument)
+ return profiledBlock->valueProfileForArgument(node->local().toArgument());
+
+ if (node->hasLocal(*this)) {
+ if (m_form == SSA)
+ return 0;
+ if (!node->local().isArgument())
+ return 0;
+ int argument = node->local().toArgument();
+ if (node->variableAccessData() != m_arguments[argument]->variableAccessData())
+ return 0;
+ return profiledBlock->valueProfileForArgument(argument);
+ }
+
+ if (node->hasHeapPrediction())
+ return profiledBlock->valueProfileForBytecodeOffset(node->codeOrigin.bytecodeIndex);
+
+ return 0;
+ }
+
+ MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node* node)
+ {
+ if (!node)
+ return MethodOfGettingAValueProfile();
+
+ CodeBlock* profiledBlock = baselineCodeBlockFor(node->codeOrigin);
+
+ if (node->op() == GetLocal) {
+ return MethodOfGettingAValueProfile::fromLazyOperand(
+ profiledBlock,
+ LazyOperandValueProfileKey(
+ node->codeOrigin.bytecodeIndex, node->local()));
+ }
+
+ return MethodOfGettingAValueProfile(valueProfileFor(node));
+ }
+
+ bool needsActivation() const
+ {
+ return m_codeBlock->needsFullScopeChain() && m_codeBlock->codeType() != GlobalCode;
+ }
+
+ bool usesArguments() const
+ {
+ return m_codeBlock->usesArguments();
+ }
BlockIndex numBlocks() const { return m_blocks.size(); }
BasicBlock* block(BlockIndex blockIndex) const { return m_blocks[blockIndex].get(); }
@@ -434,7 +593,7 @@ public:
void killBlock(BlockIndex blockIndex)
{
- m_blocks[blockIndex] = nullptr;
+ m_blocks[blockIndex].clear();
}
void killBlock(BasicBlock* basicBlock)
@@ -446,10 +605,76 @@ public:
void killUnreachableBlocks();
+ bool isPredictedNumerical(Node* node)
+ {
+ return isNumerical(node->child1().useKind()) && isNumerical(node->child2().useKind());
+ }
+
+ // Note that a 'true' return does not actually mean that the ByVal access clobbers nothing.
+ // It really means that it will not clobber the entire world. It's still up to you to
+ // carefully consider things like:
+ // - PutByVal definitely changes the array it stores to, and may even change its length.
+ // - PutByOffset definitely changes the object it stores to.
+ // - and so on.
+ bool byValIsPure(Node* node)
+ {
+ switch (node->arrayMode().type()) {
+ case Array::Generic:
+ return false;
+ case Array::Int32:
+ case Array::Double:
+ case Array::Contiguous:
+ case Array::ArrayStorage:
+ return !node->arrayMode().isOutOfBounds();
+ case Array::SlowPutArrayStorage:
+ return !node->arrayMode().mayStoreToHole();
+ case Array::String:
+ return node->op() == GetByVal && node->arrayMode().isInBounds();
+#if USE(JSVALUE32_64)
+ case Array::Arguments:
+ if (node->op() == GetByVal)
+ return true;
+ return false;
+#endif // USE(JSVALUE32_64)
+ default:
+ return true;
+ }
+ }
+
+ bool clobbersWorld(Node* node)
+ {
+ if (node->flags() & NodeClobbersWorld)
+ return true;
+ if (!(node->flags() & NodeMightClobber))
+ return false;
+ switch (node->op()) {
+ case GetByVal:
+ case PutByValDirect:
+ case PutByVal:
+ case PutByValAlias:
+ return !byValIsPure(node);
+ case ToString:
+ switch (node->child1().useKind()) {
+ case StringObjectUse:
+ case StringOrStringObjectUse:
+ return false;
+ case CellUse:
+ case UntypedUse:
+ return true;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return true;
+ }
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return true; // If by some oddity we hit this case in release build it's safer to have CSE assume the worst.
+ }
+ }
+
void determineReachability();
void resetReachability();
- void computeRefCounts();
+ void resetExitStates();
unsigned varArgNumChildren(Node* node)
{
@@ -477,7 +702,7 @@ public:
return node->children.child(index);
}
- void voteNode(Node* node, unsigned ballot, float weight = 1)
+ void voteNode(Node* node, unsigned ballot)
{
switch (node->op()) {
case ValueToInt32:
@@ -489,35 +714,35 @@ public:
}
if (node->op() == GetLocal)
- node->variableAccessData()->vote(ballot, weight);
+ node->variableAccessData()->vote(ballot);
}
- void voteNode(Edge edge, unsigned ballot, float weight = 1)
+ void voteNode(Edge edge, unsigned ballot)
{
- voteNode(edge.node(), ballot, weight);
+ voteNode(edge.node(), ballot);
}
- void voteChildren(Node* node, unsigned ballot, float weight = 1)
+ void voteChildren(Node* node, unsigned ballot)
{
if (node->flags() & NodeHasVarArgs) {
for (unsigned childIdx = node->firstChild();
childIdx < node->firstChild() + node->numChildren();
childIdx++) {
if (!!m_varArgChildren[childIdx])
- voteNode(m_varArgChildren[childIdx], ballot, weight);
+ voteNode(m_varArgChildren[childIdx], ballot);
}
return;
}
if (!node->child1())
return;
- voteNode(node->child1(), ballot, weight);
+ voteNode(node->child1(), ballot);
if (!node->child2())
return;
- voteNode(node->child2(), ballot, weight);
+ voteNode(node->child2(), ballot);
if (!node->child3())
return;
- voteNode(node->child3(), ballot, weight);
+ voteNode(node->child3(), ballot);
}
template<typename T> // T = Node* or Edge
@@ -552,246 +777,32 @@ public:
void invalidateCFG();
- void clearFlagsOnAllNodes(NodeFlags);
-
void clearReplacements();
- void clearEpochs();
void initializeNodeOwners();
- BlockList blocksInPreOrder();
- BlockList blocksInPostOrder();
-
- class NaturalBlockIterable {
- public:
- NaturalBlockIterable()
- : m_graph(nullptr)
- {
- }
-
- NaturalBlockIterable(Graph& graph)
- : m_graph(&graph)
- {
- }
-
- class iterator {
- public:
- iterator()
- : m_graph(nullptr)
- , m_index(0)
- {
- }
-
- iterator(Graph& graph, BlockIndex index)
- : m_graph(&graph)
- , m_index(findNext(index))
- {
- }
-
- BasicBlock *operator*()
- {
- return m_graph->block(m_index);
- }
-
- iterator& operator++()
- {
- m_index = findNext(m_index + 1);
- return *this;
- }
-
- bool operator==(const iterator& other) const
- {
- return m_index == other.m_index;
- }
-
- bool operator!=(const iterator& other) const
- {
- return !(*this == other);
- }
-
- private:
- BlockIndex findNext(BlockIndex index)
- {
- while (index < m_graph->numBlocks() && !m_graph->block(index))
- index++;
- return index;
- }
-
- Graph* m_graph;
- BlockIndex m_index;
- };
-
- iterator begin()
- {
- return iterator(*m_graph, 0);
- }
-
- iterator end()
- {
- return iterator(*m_graph, m_graph->numBlocks());
- }
-
- private:
- Graph* m_graph;
- };
-
- NaturalBlockIterable blocksInNaturalOrder()
- {
- return NaturalBlockIterable(*this);
- }
-
- template<typename ChildFunctor>
- void doToChildrenWithNode(Node* node, const ChildFunctor& functor)
- {
- DFG_NODE_DO_TO_CHILDREN(*this, node, functor);
- }
-
- template<typename ChildFunctor>
- void doToChildren(Node* node, const ChildFunctor& functor)
- {
- doToChildrenWithNode(
- node,
- [&functor] (Node*, Edge& edge) {
- functor(edge);
- });
- }
-
- bool uses(Node* node, Node* child)
- {
- bool result = false;
- doToChildren(node, [&] (Edge edge) { result |= edge == child; });
- return result;
- }
+ void getBlocksInDepthFirstOrder(Vector<BasicBlock*>& result);
Profiler::Compilation* compilation() { return m_plan.compilation.get(); }
DesiredIdentifiers& identifiers() { return m_plan.identifiers; }
DesiredWatchpoints& watchpoints() { return m_plan.watchpoints; }
-
- // Returns false if the key is already invalid or unwatchable. If this is a Presence condition,
- // this also makes it cheap to query if the condition holds. Also makes sure that the GC knows
- // what's going on.
- bool watchCondition(const ObjectPropertyCondition&);
-
- // Checks if it's known that loading from the given object at the given offset is fine. This is
- // computed by tracking which conditions we track with watchCondition().
- bool isSafeToLoad(JSObject* base, PropertyOffset);
+ DesiredStructureChains& chains() { return m_plan.chains; }
FullBytecodeLiveness& livenessFor(CodeBlock*);
FullBytecodeLiveness& livenessFor(InlineCallFrame*);
-
- // Quickly query if a single local is live at the given point. This is faster than calling
- // forAllLiveInBytecode() if you will only query one local. But, if you want to know all of the
- // locals live, then calling this for each local is much slower than forAllLiveInBytecode().
bool isLiveInBytecode(VirtualRegister, CodeOrigin);
- // Quickly get all of the non-argument locals live at the given point. This doesn't give you
- // any arguments because those are all presumed live. You can call forAllLiveInBytecode() to
- // also get the arguments. This is much faster than calling isLiveInBytecode() for each local.
- template<typename Functor>
- void forAllLocalsLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
- {
- // Support for not redundantly reporting arguments. Necessary because in case of a varargs
- // call, only the callee knows that arguments are live while in the case of a non-varargs
- // call, both callee and caller will see the variables live.
- VirtualRegister exclusionStart;
- VirtualRegister exclusionEnd;
-
- for (;;) {
- InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
- VirtualRegister stackOffset(inlineCallFrame ? inlineCallFrame->stackOffset : 0);
-
- if (inlineCallFrame) {
- if (inlineCallFrame->isClosureCall)
- functor(stackOffset + JSStack::Callee);
- if (inlineCallFrame->isVarargs())
- functor(stackOffset + JSStack::ArgumentCount);
- }
-
- CodeBlock* codeBlock = baselineCodeBlockFor(inlineCallFrame);
- FullBytecodeLiveness& fullLiveness = livenessFor(codeBlock);
- const FastBitVector& liveness = fullLiveness.getLiveness(codeOrigin.bytecodeIndex);
- for (unsigned relativeLocal = codeBlock->m_numCalleeRegisters; relativeLocal--;) {
- VirtualRegister reg = stackOffset + virtualRegisterForLocal(relativeLocal);
-
- // Don't report if our callee already reported.
- if (reg >= exclusionStart && reg < exclusionEnd)
- continue;
-
- if (liveness.get(relativeLocal))
- functor(reg);
- }
-
- if (!inlineCallFrame)
- break;
-
- // Arguments are always live. This would be redundant if it wasn't for our
- // op_call_varargs inlining. See the comment above.
- exclusionStart = stackOffset + CallFrame::argumentOffsetIncludingThis(0);
- exclusionEnd = stackOffset + CallFrame::argumentOffsetIncludingThis(inlineCallFrame->arguments.size());
-
- // We will always have a "this" argument and exclusionStart should be a smaller stack
- // offset than exclusionEnd.
- ASSERT(exclusionStart < exclusionEnd);
-
- for (VirtualRegister reg = exclusionStart; reg < exclusionEnd; reg += 1)
- functor(reg);
-
- codeOrigin = inlineCallFrame->caller;
- }
- }
-
- // Get a BitVector of all of the non-argument locals live right now. This is mostly useful if
- // you want to compare two sets of live locals from two different CodeOrigins.
- BitVector localsLiveInBytecode(CodeOrigin);
-
- // Tells you all of the arguments and locals live at the given CodeOrigin. This is a small
- // extension to forAllLocalsLiveInBytecode(), since all arguments are always presumed live.
- template<typename Functor>
- void forAllLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
- {
- forAllLocalsLiveInBytecode(codeOrigin, functor);
-
- // Report all arguments as being live.
- for (unsigned argument = block(0)->variablesAtHead.numberOfArguments(); argument--;)
- functor(virtualRegisterForArgument(argument));
- }
-
- BytecodeKills& killsFor(CodeBlock*);
- BytecodeKills& killsFor(InlineCallFrame*);
-
unsigned frameRegisterCount();
- unsigned stackPointerOffset();
unsigned requiredRegisterCountForExit();
unsigned requiredRegisterCountForExecutionAndExit();
- JSValue tryGetConstantProperty(JSValue base, const StructureSet&, PropertyOffset);
- JSValue tryGetConstantProperty(JSValue base, Structure*, PropertyOffset);
- JSValue tryGetConstantProperty(JSValue base, const StructureAbstractValue&, PropertyOffset);
- JSValue tryGetConstantProperty(const AbstractValue&, PropertyOffset);
-
- JSValue tryGetConstantClosureVar(JSValue base, ScopeOffset);
- JSValue tryGetConstantClosureVar(const AbstractValue&, ScopeOffset);
- JSValue tryGetConstantClosureVar(Node*, ScopeOffset);
-
- JSArrayBufferView* tryGetFoldableView(JSValue);
- JSArrayBufferView* tryGetFoldableView(JSValue, ArrayMode arrayMode);
-
- void registerFrozenValues();
+ JSActivation* tryGetActivation(Node*);
+ WriteBarrierBase<Unknown>* tryGetRegisters(Node*);
- virtual void visitChildren(SlotVisitor&) override;
+ JSArrayBufferView* tryGetFoldableView(Node*);
+ JSArrayBufferView* tryGetFoldableView(Node*, ArrayMode);
+ JSArrayBufferView* tryGetFoldableViewForChild1(Node*);
- NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
- std::nullptr_t, const char* file, int line, const char* function,
- const char* assertion);
- NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
- Node*, const char* file, int line, const char* function,
- const char* assertion);
- NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
- BasicBlock*, const char* file, int line, const char* function,
- const char* assertion);
-
- bool hasDebuggerEnabled() const { return m_hasDebuggerEnabled; }
-
VM& m_vm;
Plan& m_plan;
CodeBlock* m_codeBlock;
@@ -799,105 +810,54 @@ public:
NodeAllocator& m_allocator;
+ Operands<AbstractValue> m_mustHandleAbstractValues;
+
Vector< RefPtr<BasicBlock> , 8> m_blocks;
Vector<Edge, 16> m_varArgChildren;
-
- HashMap<EncodedJSValue, FrozenValue*, EncodedJSValueHash, EncodedJSValueHashTraits> m_frozenValueMap;
- Bag<FrozenValue> m_frozenValues;
-
- Vector<uint32_t> m_uint32ValuesInUse;
-
- Bag<StorageAccessData> m_storageAccessData;
-
- // In CPS, this is all of the SetArgument nodes for the arguments in the machine code block
- // that survived DCE. All of them except maybe "this" will survive DCE, because of the Flush
- // nodes.
- //
- // In SSA, this is all of the GetStack nodes for the arguments in the machine code block that
- // may have some speculation in the prologue and survived DCE. Note that to get the speculation
- // for an argument in SSA, you must use m_argumentFormats, since we still have to speculate
- // even if the argument got killed. For example:
- //
- // function foo(x) {
- // var tmp = x + 1;
- // }
- //
- // Assume that x is always int during profiling. The ArithAdd for "x + 1" will be dead and will
- // have a proven check for the edge to "x". So, we will not insert a Check node and we will
- // kill the GetStack for "x". But, we must do the int check in the progolue, because that's the
- // thing we used to allow DCE of ArithAdd. Otherwise the add could be impure:
- //
- // var o = {
- // valueOf: function() { do side effects; }
- // };
- // foo(o);
- //
- // If we DCE the ArithAdd and we remove the int check on x, then this won't do the side
- // effects.
+ Vector<StorageAccessData> m_storageAccessData;
Vector<Node*, 8> m_arguments;
-
- // In CPS, this is meaningless. In SSA, this is the argument speculation that we've locked in.
- Vector<FlushFormat> m_argumentFormats;
-
SegmentedVector<VariableAccessData, 16> m_variableAccessData;
SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
SegmentedVector<StructureSet, 16> m_structureSet;
- Bag<Transition> m_transitions;
+ SegmentedVector<StructureTransitionData, 8> m_structureTransitionData;
SegmentedVector<NewArrayBufferData, 4> m_newArrayBufferData;
- Bag<BranchData> m_branchData;
- Bag<SwitchData> m_switchData;
- Bag<MultiGetByOffsetData> m_multiGetByOffsetData;
- Bag<MultiPutByOffsetData> m_multiPutByOffsetData;
- Bag<ObjectMaterializationData> m_objectMaterializationData;
- Bag<CallVarargsData> m_callVarargsData;
- Bag<LoadVarargsData> m_loadVarargsData;
- Bag<StackAccessData> m_stackAccessData;
+ SegmentedVector<SwitchData, 4> m_switchData;
Vector<InlineVariableData, 4> m_inlineVariableData;
+ OwnPtr<InlineCallFrameSet> m_inlineCallFrames;
HashMap<CodeBlock*, std::unique_ptr<FullBytecodeLiveness>> m_bytecodeLiveness;
- HashMap<CodeBlock*, std::unique_ptr<BytecodeKills>> m_bytecodeKills;
- HashSet<std::pair<JSObject*, PropertyOffset>> m_safeToLoad;
+ bool m_hasArguments;
+ HashSet<ExecutableBase*> m_executablesWhoseArgumentsEscaped;
+ BitVector m_lazyVars;
Dominators m_dominators;
- PrePostNumbering m_prePostNumbering;
NaturalLoops m_naturalLoops;
unsigned m_localVars;
unsigned m_nextMachineLocal;
unsigned m_parameterSlots;
-
-#if USE(JSVALUE32_64)
- std::unordered_map<int64_t, double*> m_doubleConstantsMap;
- std::unique_ptr<Bag<double>> m_doubleConstants;
-#endif
+ int m_machineCaptureStart;
+ std::unique_ptr<SlowArgument[]> m_slowArguments;
OptimizationFixpointState m_fixpointState;
- StructureRegistrationState m_structureRegistrationState;
GraphForm m_form;
UnificationState m_unificationState;
- PlanStage m_planStage { PlanStage::Initial };
RefCountState m_refCountState;
- bool m_hasDebuggerEnabled;
private:
void handleSuccessor(Vector<BasicBlock*, 16>& worklist, BasicBlock*, BasicBlock* successor);
+ void addForDepthFirstSort(Vector<BasicBlock*>& result, Vector<BasicBlock*, 16>& worklist, HashSet<BasicBlock*>& seen, BasicBlock*);
- AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* operand, Node*immediate, RareCaseProfilingSource source)
+ AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* immediate)
{
ASSERT(immediate->hasConstant());
- JSValue immediateValue = immediate->asJSValue();
- if (!immediateValue.isNumber() && !immediateValue.isBoolean())
+ JSValue immediateValue = immediate->valueOfJSConstant(m_codeBlock);
+ if (!immediateValue.isNumber())
return DontSpeculateInt32;
if (!variableShouldSpeculateInt32)
return DontSpeculateInt32;
-
- // Integer constants can be typed Double if they are written like a double in the source code (e.g. 42.0).
- // In that case, we stay conservative unless the other operand was explicitly typed as integer.
- NodeFlags operandResultType = operand->result();
- if (operandResultType != NodeResultInt32 && immediateValue.isDouble())
- return DontSpeculateInt32;
- if (immediateValue.isBoolean() || jsNumber(immediateValue.asNumber()).isInt32())
- return add->canSpeculateInt32(source) ? SpeculateInt32 : DontSpeculateInt32;
+ if (immediateValue.isInt32())
+ return add->canSpeculateInt32() ? SpeculateInt32 : DontSpeculateInt32;
double doubleImmediate = immediateValue.asDouble();
const double twoToThe48 = 281474976710656.0;
@@ -906,8 +866,62 @@ private:
return bytecodeCanTruncateInteger(add->arithNodeFlags()) ? SpeculateInt32AndTruncateConstants : DontSpeculateInt32;
}
+
+ bool mulImmediateShouldSpeculateInt32(Node* mul, Node* variable, Node* immediate)
+ {
+ ASSERT(immediate->hasConstant());
+
+ JSValue immediateValue = immediate->valueOfJSConstant(m_codeBlock);
+ if (!immediateValue.isInt32())
+ return false;
+
+ if (!variable->shouldSpeculateInt32ForArithmetic())
+ return false;
+
+ int32_t intImmediate = immediateValue.asInt32();
+ // Doubles have a 53 bit mantissa so we expect a multiplication of 2^31 (the highest
+ // magnitude possible int32 value) and any value less than 2^22 to not result in any
+ // rounding in a double multiplication - hence it will be equivalent to an integer
+ // multiplication, if we are doing int32 truncation afterwards (which is what
+ // canSpeculateInt32() implies).
+ const int32_t twoToThe22 = 1 << 22;
+ if (intImmediate <= -twoToThe22 || intImmediate >= twoToThe22)
+ return mul->canSpeculateInt32() && !nodeMayOverflow(mul->arithNodeFlags());
+
+ return mul->canSpeculateInt32();
+ }
};
+#define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
+ Node* _node = (node); \
+ if (_node->flags() & NodeHasVarArgs) { \
+ for (unsigned _childIdx = _node->firstChild(); \
+ _childIdx < _node->firstChild() + _node->numChildren(); \
+ _childIdx++) { \
+ if (!!(graph).m_varArgChildren[_childIdx]) \
+ thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
+ } \
+ } else { \
+ if (!_node->child1()) { \
+ ASSERT( \
+ !_node->child2() \
+ && !_node->child3()); \
+ break; \
+ } \
+ thingToDo(_node, _node->child1()); \
+ \
+ if (!_node->child2()) { \
+ ASSERT(!_node->child3()); \
+ break; \
+ } \
+ thingToDo(_node, _node->child2()); \
+ \
+ if (!_node->child3()) \
+ break; \
+ thingToDo(_node, _node->child3()); \
+ } \
+ } while (false)
+
} } // namespace JSC::DFG
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGGraphSafepoint.cpp b/Source/JavaScriptCore/dfg/DFGGraphSafepoint.cpp
deleted file mode 100644
index e021e99bc..000000000
--- a/Source/JavaScriptCore/dfg/DFGGraphSafepoint.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGGraphSafepoint.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGGraph.h"
-#include "JSCInlines.h"
-
-namespace JSC { namespace DFG {
-
-GraphSafepoint::GraphSafepoint(Graph& graph, Safepoint::Result& result)
- : m_safepoint(graph.m_plan, result)
-{
- m_safepoint.add(&graph);
- m_safepoint.begin();
-}
-
-GraphSafepoint::~GraphSafepoint() { }
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGHeapLocation.cpp b/Source/JavaScriptCore/dfg/DFGHeapLocation.cpp
deleted file mode 100644
index 2ca344a53..000000000
--- a/Source/JavaScriptCore/dfg/DFGHeapLocation.cpp
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGHeapLocation.h"
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-void HeapLocation::dump(PrintStream& out) const
-{
- out.print(m_kind, ":", m_heap);
-
- if (!m_base)
- return;
-
- out.print("[", m_base);
- if (m_index)
- out.print(", ", m_index);
- out.print("]");
-}
-
-} } // namespace JSC::DFG
-
-namespace WTF {
-
-using namespace JSC::DFG;
-
-void printInternal(PrintStream& out, LocationKind kind)
-{
- switch (kind) {
- case InvalidLocationKind:
- out.print("InvalidLocationKind");
- return;
-
- case InvalidationPointLoc:
- out.print("InvalidationPointLoc");
- return;
-
- case IsObjectOrNullLoc:
- out.print("IsObjectOrNullLoc");
- return;
-
- case IsFunctionLoc:
- out.print("IsFunctionLoc");
- return;
-
- case GetterLoc:
- out.print("GetterLoc");
- return;
-
- case SetterLoc:
- out.print("SetterLoc");
- return;
-
- case StackLoc:
- out.print("StackLoc");
- return;
-
- case StackPayloadLoc:
- out.print("StackPayloadLoc");
- return;
-
- case ArrayLengthLoc:
- out.print("ArrayLengthLoc");
- return;
-
- case ButterflyLoc:
- out.print("ButterflyLoc");
- return;
-
- case CheckHasInstanceLoc:
- out.print("CheckHasInstanceLoc");
- return;
-
- case ClosureVariableLoc:
- out.print("ClosureVariableLoc");
- return;
-
- case DirectArgumentsLoc:
- out.print("DirectArgumentsLoc");
- return;
-
- case GlobalVariableLoc:
- out.print("GlobalVariableLoc");
- return;
-
- case HasIndexedPropertyLoc:
- out.print("HasIndexedPorpertyLoc");
- return;
-
- case IndexedPropertyLoc:
- out.print("IndexedPorpertyLoc");
- return;
-
- case IndexedPropertyStorageLoc:
- out.print("IndexedPropertyStorageLoc");
- return;
-
- case InstanceOfLoc:
- out.print("InstanceOfLoc");
- return;
-
- case NamedPropertyLoc:
- out.print("NamedPropertyLoc");
- return;
-
- case TypedArrayByteOffsetLoc:
- out.print("TypedArrayByteOffsetLoc");
- return;
-
- case VarInjectionWatchpointLoc:
- out.print("VarInjectionWatchpointLoc");
- return;
-
- case StructureLoc:
- out.print("StructureLoc");
- return;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
-}
-
-} // namespace WTF
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGHeapLocation.h b/Source/JavaScriptCore/dfg/DFGHeapLocation.h
deleted file mode 100644
index 3d3a94ccf..000000000
--- a/Source/JavaScriptCore/dfg/DFGHeapLocation.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGHeapLocation_h
-#define DFGHeapLocation_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGAbstractHeap.h"
-#include "DFGLazyNode.h"
-#include "DFGNode.h"
-
-namespace JSC { namespace DFG {
-
-enum LocationKind {
- InvalidLocationKind,
-
- ArrayLengthLoc,
- ButterflyLoc,
- CheckHasInstanceLoc,
- ClosureVariableLoc,
- DirectArgumentsLoc,
- GetterLoc,
- GlobalVariableLoc,
- HasIndexedPropertyLoc,
- IndexedPropertyLoc,
- IndexedPropertyStorageLoc,
- InstanceOfLoc,
- InvalidationPointLoc,
- IsFunctionLoc,
- IsObjectOrNullLoc,
- NamedPropertyLoc,
- SetterLoc,
- StructureLoc,
- TypedArrayByteOffsetLoc,
- VarInjectionWatchpointLoc,
- StackLoc,
- StackPayloadLoc
-};
-
-class HeapLocation {
-public:
- HeapLocation(
- LocationKind kind = InvalidLocationKind,
- AbstractHeap heap = AbstractHeap(),
- Node* base = nullptr, LazyNode index = LazyNode())
- : m_kind(kind)
- , m_heap(heap)
- , m_base(base)
- , m_index(index)
- {
- ASSERT((kind == InvalidLocationKind) == !heap);
- ASSERT(!!m_heap || !m_base);
- ASSERT(m_base || !m_index);
- }
-
- HeapLocation(LocationKind kind, AbstractHeap heap, Node* base, Node* index)
- : HeapLocation(kind, heap, base, LazyNode(index))
- {
- }
-
- HeapLocation(LocationKind kind, AbstractHeap heap, Edge base, Edge index = Edge())
- : HeapLocation(kind, heap, base.node(), index.node())
- {
- }
-
- HeapLocation(WTF::HashTableDeletedValueType)
- : m_kind(InvalidLocationKind)
- , m_heap(WTF::HashTableDeletedValue)
- , m_base(nullptr)
- , m_index(nullptr)
- {
- }
-
- bool operator!() const { return !m_heap; }
-
- LocationKind kind() const { return m_kind; }
- AbstractHeap heap() const { return m_heap; }
- Node* base() const { return m_base; }
- LazyNode index() const { return m_index; }
-
- unsigned hash() const
- {
- return m_kind + m_heap.hash() + m_index.hash() + m_kind;
- }
-
- bool operator==(const HeapLocation& other) const
- {
- return m_kind == other.m_kind
- && m_heap == other.m_heap
- && m_base == other.m_base
- && m_index == other.m_index;
- }
-
- bool isHashTableDeletedValue() const
- {
- return m_heap.isHashTableDeletedValue();
- }
-
- void dump(PrintStream& out) const;
-
-private:
- LocationKind m_kind;
- AbstractHeap m_heap;
- Node* m_base;
- LazyNode m_index;
-};
-
-struct HeapLocationHash {
- static unsigned hash(const HeapLocation& key) { return key.hash(); }
- static bool equal(const HeapLocation& a, const HeapLocation& b) { return a == b; }
- static const bool safeToCompareToEmptyOrDeleted = true;
-};
-
-} } // namespace JSC::DFG
-
-namespace WTF {
-
-void printInternal(PrintStream&, JSC::DFG::LocationKind);
-
-template<typename T> struct DefaultHash;
-template<> struct DefaultHash<JSC::DFG::HeapLocation> {
- typedef JSC::DFG::HeapLocationHash Hash;
-};
-
-template<typename T> struct HashTraits;
-template<> struct HashTraits<JSC::DFG::HeapLocation> : SimpleClassHashTraits<JSC::DFG::HeapLocation> {
- static const bool emptyValueIsZero = false;
-};
-
-} // namespace WTF
-
-namespace JSC { namespace DFG {
-
-typedef HashMap<HeapLocation, LazyNode> ImpureMap;
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGHeapLocation_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp
index b1269c1b7..468c68f84 100644
--- a/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp
+++ b/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,14 +31,12 @@
#include "CodeBlock.h"
#include "DFGBasicBlock.h"
#include "GetByIdStatus.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include "PutByIdStatus.h"
#include "StringObject.h"
namespace JSC { namespace DFG {
-static const bool verbose = false;
-
InPlaceAbstractState::InPlaceAbstractState(Graph& graph)
: m_graph(graph)
, m_variables(m_graph.m_codeBlock->numParameters(), graph.m_localVars)
@@ -60,20 +58,36 @@ void InPlaceAbstractState::beginBasicBlock(BasicBlock* basicBlock)
forNode(basicBlock->at(i)).clear();
m_variables = basicBlock->valuesAtHead;
+ m_haveStructures = false;
+ for (size_t i = 0; i < m_variables.numberOfArguments(); ++i) {
+ if (m_variables.argument(i).hasClobberableState()) {
+ m_haveStructures = true;
+ break;
+ }
+ }
+ for (size_t i = 0; i < m_variables.numberOfLocals(); ++i) {
+ if (m_variables.local(i).hasClobberableState()) {
+ m_haveStructures = true;
+ break;
+ }
+ }
if (m_graph.m_form == SSA) {
HashMap<Node*, AbstractValue>::iterator iter = basicBlock->ssa->valuesAtHead.begin();
HashMap<Node*, AbstractValue>::iterator end = basicBlock->ssa->valuesAtHead.end();
- for (; iter != end; ++iter)
+ for (; iter != end; ++iter) {
forNode(iter->key) = iter->value;
+ if (iter->value.hasClobberableState())
+ m_haveStructures = true;
+ }
}
+
basicBlock->cfaShouldRevisit = false;
basicBlock->cfaHasVisited = true;
m_block = basicBlock;
m_isValid = true;
m_foundConstants = false;
m_branchDirection = InvalidBranchDirection;
- m_structureClobberState = basicBlock->cfaStructureClobberStateAtHead;
}
static void setLiveValues(HashMap<Node*, AbstractValue>& values, HashSet<Node*>& live)
@@ -92,44 +106,37 @@ void InPlaceAbstractState::initialize()
root->cfaShouldRevisit = true;
root->cfaHasVisited = false;
root->cfaFoundConstants = false;
- root->cfaStructureClobberStateAtHead = StructuresAreWatched;
- root->cfaStructureClobberStateAtTail = StructuresAreWatched;
for (size_t i = 0; i < root->valuesAtHead.numberOfArguments(); ++i) {
root->valuesAtTail.argument(i).clear();
-
- FlushFormat format;
- if (m_graph.m_form == SSA)
- format = m_graph.m_argumentFormats[i];
- else {
- Node* node = m_graph.m_arguments[i];
- if (!node)
- format = FlushedJSValue;
- else {
- ASSERT(node->op() == SetArgument);
- format = node->variableAccessData()->flushFormat();
- }
+ if (m_graph.m_form == SSA) {
+ root->valuesAtHead.argument(i).makeHeapTop();
+ continue;
+ }
+
+ Node* node = root->variablesAtHead.argument(i);
+ ASSERT(node->op() == SetArgument);
+ if (!node->variableAccessData()->shouldUnboxIfPossible()) {
+ root->valuesAtHead.argument(i).makeHeapTop();
+ continue;
}
- switch (format) {
- case FlushedInt32:
+ SpeculatedType prediction =
+ node->variableAccessData()->argumentAwarePrediction();
+ if (isInt32Speculation(prediction))
root->valuesAtHead.argument(i).setType(SpecInt32);
- break;
- case FlushedBoolean:
+ else if (isBooleanSpeculation(prediction))
root->valuesAtHead.argument(i).setType(SpecBoolean);
- break;
- case FlushedCell:
- root->valuesAtHead.argument(i).setType(m_graph, SpecCell);
- break;
- case FlushedJSValue:
+ else if (isCellSpeculation(prediction))
+ root->valuesAtHead.argument(i).setType(SpecCell);
+ else
root->valuesAtHead.argument(i).makeHeapTop();
- break;
- default:
- DFG_CRASH(m_graph, nullptr, "Bad flush format for argument");
- break;
- }
}
for (size_t i = 0; i < root->valuesAtHead.numberOfLocals(); ++i) {
- root->valuesAtHead.local(i).clear();
+ Node* node = root->variablesAtHead.local(i);
+ if (node && node->variableAccessData()->isCaptured())
+ root->valuesAtHead.local(i).makeHeapTop();
+ else
+ root->valuesAtHead.local(i).clear();
root->valuesAtTail.local(i).clear();
}
for (BlockIndex blockIndex = 1 ; blockIndex < m_graph.numBlocks(); ++blockIndex) {
@@ -140,8 +147,6 @@ void InPlaceAbstractState::initialize()
block->cfaShouldRevisit = false;
block->cfaHasVisited = false;
block->cfaFoundConstants = false;
- block->cfaStructureClobberStateAtHead = StructuresAreWatched;
- block->cfaStructureClobberStateAtTail = StructuresAreWatched;
for (size_t i = 0; i < block->valuesAtHead.numberOfArguments(); ++i) {
block->valuesAtHead.argument(i).clear();
block->valuesAtTail.argument(i).clear();
@@ -150,6 +155,16 @@ void InPlaceAbstractState::initialize()
block->valuesAtHead.local(i).clear();
block->valuesAtTail.local(i).clear();
}
+ if (!block->isOSRTarget)
+ continue;
+ if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex)
+ continue;
+ for (size_t i = 0; i < m_graph.m_mustHandleAbstractValues.size(); ++i) {
+ AbstractValue value = m_graph.m_mustHandleAbstractValues[i];
+ int operand = m_graph.m_mustHandleAbstractValues.operandForIndex(i);
+ block->valuesAtHead.operand(operand).merge(value);
+ }
+ block->cfaShouldRevisit = true;
}
if (m_graph.m_form == SSA) {
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
@@ -179,9 +194,7 @@ bool InPlaceAbstractState::endBasicBlock(MergeMode mergeMode)
bool changed = false;
- if ((mergeMode != DontMerge) || !ASSERT_DISABLED) {
- changed |= checkAndSet(block->cfaStructureClobberStateAtTail, m_structureClobberState);
-
+ if (mergeMode != DontMerge || !ASSERT_DISABLED) {
switch (m_graph.m_form) {
case ThreadedCPS: {
for (size_t argument = 0; argument < block->variablesAtTail.numberOfArguments(); ++argument) {
@@ -229,7 +242,6 @@ void InPlaceAbstractState::reset()
m_block = 0;
m_isValid = false;
m_branchDirection = InvalidBranchDirection;
- m_structureClobberState = StructuresAreWatched;
}
bool InPlaceAbstractState::mergeStateAtTail(AbstractValue& destination, AbstractValue& inVariable, Node* node)
@@ -239,31 +251,46 @@ bool InPlaceAbstractState::mergeStateAtTail(AbstractValue& destination, Abstract
AbstractValue source;
- switch (node->op()) {
- case Phi:
- case SetArgument:
- case PhantomLocal:
- case Flush:
- // The block transfers the value from head to tail.
+ if (node->variableAccessData()->isCaptured()) {
+ // If it's captured then we know that whatever value was stored into the variable last is the
+ // one we care about. This is true even if the variable at tail is dead, which might happen if
+ // the last thing we did to the variable was a GetLocal and then ended up now using the
+ // GetLocal's result.
+
source = inVariable;
- break;
+ } else {
+ switch (node->op()) {
+ case Phi:
+ case SetArgument:
+ case PhantomLocal:
+ case Flush:
+ // The block transfers the value from head to tail.
+ source = inVariable;
+ break;
- case GetLocal:
- // The block refines the value with additional speculations.
- source = forNode(node);
- break;
+ case GetLocal:
+ // The block refines the value with additional speculations.
+ source = forNode(node);
+ break;
- case SetLocal:
- // The block sets the variable, and potentially refines it, both
- // before and after setting it.
- source = forNode(node->child1());
- if (node->variableAccessData()->flushFormat() == FlushedDouble)
- RELEASE_ASSERT(!(source.m_type & ~SpecFullDouble));
- break;
+ case SetLocal:
+ // The block sets the variable, and potentially refines it, both
+ // before and after setting it.
+ source = forNode(node->child1());
+ if (node->variableAccessData()->flushFormat() == FlushedDouble) {
+ ASSERT(!(source.m_type & ~SpecFullNumber));
+ ASSERT(!!(source.m_type & ~SpecDouble) == !!(source.m_type & SpecMachineInt));
+ if (!(source.m_type & ~SpecDouble)) {
+ source.merge(SpecInt52AsDouble);
+ source.filter(SpecDouble);
+ }
+ }
+ break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
}
if (destination == source) {
@@ -281,17 +308,11 @@ bool InPlaceAbstractState::mergeStateAtTail(AbstractValue& destination, Abstract
bool InPlaceAbstractState::merge(BasicBlock* from, BasicBlock* to)
{
- if (verbose)
- dataLog(" Merging from ", pointerDump(from), " to ", pointerDump(to), "\n");
ASSERT(from->variablesAtTail.numberOfArguments() == to->variablesAtHead.numberOfArguments());
ASSERT(from->variablesAtTail.numberOfLocals() == to->variablesAtHead.numberOfLocals());
bool changed = false;
- changed |= checkAndSet(
- to->cfaStructureClobberStateAtHead,
- DFG::merge(from->cfaStructureClobberStateAtTail, to->cfaStructureClobberStateAtHead));
-
switch (m_graph.m_form) {
case ThreadedCPS: {
for (size_t argument = 0; argument < from->variablesAtTail.numberOfArguments(); ++argument) {
@@ -314,12 +335,8 @@ bool InPlaceAbstractState::merge(BasicBlock* from, BasicBlock* to)
HashSet<Node*>::iterator end = to->ssa->liveAtHead.end();
for (; iter != end; ++iter) {
Node* node = *iter;
- if (verbose)
- dataLog(" Merging for ", node, ": from ", from->ssa->valuesAtTail.find(node)->value, " to ", to->ssa->valuesAtHead.find(node)->value, "\n");
changed |= to->ssa->valuesAtHead.find(node)->value.merge(
from->ssa->valuesAtTail.find(node)->value);
- if (verbose)
- dataLog(" Result: ", to->ssa->valuesAtHead.find(node)->value, "\n");
}
break;
}
@@ -332,8 +349,6 @@ bool InPlaceAbstractState::merge(BasicBlock* from, BasicBlock* to)
if (!to->cfaHasVisited)
changed = true;
- if (verbose)
- dataLog(" Will revisit: ", changed, "\n");
to->cfaShouldRevisit |= changed;
return changed;
@@ -341,23 +356,23 @@ bool InPlaceAbstractState::merge(BasicBlock* from, BasicBlock* to)
inline bool InPlaceAbstractState::mergeToSuccessors(BasicBlock* basicBlock)
{
- Node* terminal = basicBlock->terminal();
+ Node* terminal = basicBlock->last();
ASSERT(terminal->isTerminal());
switch (terminal->op()) {
case Jump: {
ASSERT(basicBlock->cfaBranchDirection == InvalidBranchDirection);
- return merge(basicBlock, terminal->targetBlock());
+ return merge(basicBlock, terminal->takenBlock());
}
case Branch: {
ASSERT(basicBlock->cfaBranchDirection != InvalidBranchDirection);
bool changed = false;
if (basicBlock->cfaBranchDirection != TakeFalse)
- changed |= merge(basicBlock, terminal->branchData()->taken.block);
+ changed |= merge(basicBlock, terminal->takenBlock());
if (basicBlock->cfaBranchDirection != TakeTrue)
- changed |= merge(basicBlock, terminal->branchData()->notTaken.block);
+ changed |= merge(basicBlock, terminal->notTakenBlock());
return changed;
}
@@ -366,9 +381,9 @@ inline bool InPlaceAbstractState::mergeToSuccessors(BasicBlock* basicBlock)
// we're not. However I somehow doubt that this will ever be a big deal.
ASSERT(basicBlock->cfaBranchDirection == InvalidBranchDirection);
SwitchData* data = terminal->switchData();
- bool changed = merge(basicBlock, data->fallThrough.block);
+ bool changed = merge(basicBlock, data->fallThrough);
for (unsigned i = data->cases.size(); i--;)
- changed |= merge(basicBlock, data->cases[i].target.block);
+ changed |= merge(basicBlock, data->cases[i].target);
return changed;
}
diff --git a/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.h b/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.h
index 201444c86..f0f2a46d5 100644
--- a/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.h
+++ b/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.h
@@ -26,6 +26,8 @@
#ifndef DFGInPlaceAbstractState_h
#define DFGInPlaceAbstractState_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGAbstractValue.h"
@@ -37,9 +39,8 @@
namespace JSC { namespace DFG {
class InPlaceAbstractState {
- WTF_MAKE_FAST_ALLOCATED;
public:
- InPlaceAbstractState(Graph&);
+ InPlaceAbstractState(Graph& graph);
~InPlaceAbstractState();
@@ -104,9 +105,6 @@ public:
// Did the last executed node clobber the world?
bool didClobber() const { return m_didClobber; }
- // Are structures currently clobbered?
- StructureClobberState structureClobberState() const { return m_structureClobberState; }
-
// Is the execution state still valid? This will be false if execute() has
// returned false previously.
bool isValid() const { return m_isValid; }
@@ -126,16 +124,11 @@ public:
// Methods intended to be called from AbstractInterpreter.
void setDidClobber(bool didClobber) { m_didClobber = didClobber; }
- void setStructureClobberState(StructureClobberState value) { m_structureClobberState = value; }
void setIsValid(bool isValid) { m_isValid = isValid; }
void setBranchDirection(BranchDirection branchDirection) { m_branchDirection = branchDirection; }
-
- // This method is evil - it causes a huge maintenance headache and there is a gross amount of
- // code devoted to it. It would be much nicer to just always run the constant folder on each
- // block. But, the last time we did it, it was a 1% SunSpider regression:
- // https://bugs.webkit.org/show_bug.cgi?id=133947
- // So, we should probably keep this method.
void setFoundConstants(bool foundConstants) { m_foundConstants = foundConstants; }
+ bool haveStructures() const { return m_haveStructures; } // It's always safe to return true.
+ void setHaveStructures(bool haveStructures) { m_haveStructures = haveStructures; }
private:
bool mergeStateAtTail(AbstractValue& destination, AbstractValue& inVariable, Node*);
@@ -147,11 +140,11 @@ private:
Operands<AbstractValue> m_variables;
BasicBlock* m_block;
+ bool m_haveStructures;
bool m_foundConstants;
bool m_isValid;
bool m_didClobber;
- StructureClobberState m_structureClobberState;
BranchDirection m_branchDirection; // This is only set for blocks that end in Branch and that execute to completion (i.e. m_isValid == true).
};
diff --git a/Source/JavaScriptCore/dfg/DFGInsertionSet.h b/Source/JavaScriptCore/dfg/DFGInsertionSet.h
index c5ed4c207..8d76c4566 100644
--- a/Source/JavaScriptCore/dfg/DFGInsertionSet.h
+++ b/Source/JavaScriptCore/dfg/DFGInsertionSet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGInsertionSet_h
#define DFGInsertionSet_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGGraph.h"
@@ -43,8 +45,6 @@ public:
{
}
- Graph& graph() { return m_graph; }
-
Node* insert(const Insertion& insertion)
{
ASSERT(!m_insertions.size() || m_insertions.last().index() <= insertion.index());
@@ -57,93 +57,13 @@ public:
return insert(Insertion(index, element));
}
- template<typename... Params>
- Node* insertNode(size_t index, SpeculatedType type, Params... params)
- {
- return insert(index, m_graph.addNode(type, params...));
- }
-
- Node* insertConstant(
- size_t index, NodeOrigin origin, FrozenValue* value,
- NodeType op = JSConstant)
- {
- return insertNode(
- index, speculationFromValue(value->value()), op, origin, OpInfo(value));
- }
-
- Node* insertConstant(
- size_t index, CodeOrigin origin, FrozenValue* value, NodeType op = JSConstant)
- {
- return insertConstant(index, NodeOrigin(origin), value, op);
- }
-
- Edge insertConstantForUse(
- size_t index, NodeOrigin origin, FrozenValue* value, UseKind useKind)
- {
- NodeType op;
- if (isDouble(useKind))
- op = DoubleConstant;
- else if (useKind == Int52RepUse)
- op = Int52Constant;
- else
- op = JSConstant;
- return Edge(insertConstant(index, origin, value, op), useKind);
- }
-
- Edge insertConstantForUse(
- size_t index, CodeOrigin origin, FrozenValue* value, UseKind useKind)
- {
- return insertConstantForUse(index, NodeOrigin(origin), value, useKind);
- }
-
- Node* insertConstant(size_t index, NodeOrigin origin, JSValue value, NodeType op = JSConstant)
- {
- return insertConstant(index, origin, m_graph.freeze(value), op);
- }
-
- Node* insertConstant(size_t index, CodeOrigin origin, JSValue value, NodeType op = JSConstant)
- {
- return insertConstant(index, origin, m_graph.freeze(value), op);
- }
-
- Edge insertConstantForUse(size_t index, NodeOrigin origin, JSValue value, UseKind useKind)
- {
- return insertConstantForUse(index, origin, m_graph.freeze(value), useKind);
- }
-
- Edge insertConstantForUse(size_t index, CodeOrigin origin, JSValue value, UseKind useKind)
- {
- return insertConstantForUse(index, NodeOrigin(origin), value, useKind);
- }
-
- Edge insertBottomConstantForUse(size_t index, NodeOrigin origin, UseKind useKind)
- {
- if (isDouble(useKind))
- return insertConstantForUse(index, origin, jsNumber(PNaN), useKind);
- if (useKind == Int52RepUse)
- return insertConstantForUse(index, origin, jsNumber(0), useKind);
- return insertConstantForUse(index, origin, jsUndefined(), useKind);
- }
-
- Node* insertCheck(size_t index, NodeOrigin origin, AdjacencyList children)
- {
- children = children.justChecks();
- if (children.isEmpty())
- return nullptr;
- return insertNode(index, SpecNone, Check, origin, children);
- }
-
- Node* insertCheck(size_t index, Node* node)
- {
- return insertCheck(index, node->origin, node->children);
- }
-
- Node* insertCheck(size_t index, NodeOrigin origin, Edge edge)
- {
- if (edge.willHaveCheck())
- return insertNode(index, SpecNone, Check, origin, edge);
- return nullptr;
+#define DFG_DEFINE_INSERT_NODE(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) \
+ templatePre typeParams templatePost Node* insertNode(size_t index, SpeculatedType type valueParamsComma valueParams) \
+ { \
+ return insert(index, m_graph.addNode(type valueParamsComma valueArgs)); \
}
+ DFG_VARIADIC_TEMPLATE_FUNCTION(DFG_DEFINE_INSERT_NODE)
+#undef DFG_DEFINE_INSERT_NODE
void execute(BasicBlock* block)
{
diff --git a/Source/JavaScriptCore/dfg/DFGIntegerCheckCombiningPhase.cpp b/Source/JavaScriptCore/dfg/DFGIntegerCheckCombiningPhase.cpp
deleted file mode 100644
index 5ddda089d..000000000
--- a/Source/JavaScriptCore/dfg/DFGIntegerCheckCombiningPhase.cpp
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGIntegerCheckCombiningPhase.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGGraph.h"
-#include "DFGInsertionSet.h"
-#include "DFGPhase.h"
-#include "DFGPredictionPropagationPhase.h"
-#include "DFGVariableAccessDataDump.h"
-#include "JSCInlines.h"
-#include <unordered_map>
-#include <wtf/HashMethod.h>
-
-namespace JSC { namespace DFG {
-
-namespace {
-
-static const bool verbose = false;
-
-enum RangeKind {
- InvalidRangeKind,
-
- // This means we did ArithAdd with CheckOverflow.
- Addition,
-
- // This means we did CheckInBounds on some length.
- ArrayBounds
-};
-
-struct RangeKey {
- RangeKey()
- : m_kind(InvalidRangeKind)
- , m_key(nullptr)
- {
- }
-
- static RangeKey addition(Edge edge)
- {
- RangeKey result;
- result.m_kind = Addition;
- result.m_source = edge.sanitized();
- result.m_key = 0;
- return result;
- }
-
- static RangeKey arrayBounds(Edge edge, Node* key)
- {
- RangeKey result;
- result.m_kind = ArrayBounds;
- result.m_source = edge.sanitized();
- result.m_key = key;
- return result;
- }
-
- bool operator!() const { return m_kind == InvalidRangeKind; }
-
- unsigned hash() const
- {
- return m_kind + m_source.hash() + PtrHash<Node*>::hash(m_key);
- }
-
- bool operator==(const RangeKey& other) const
- {
- return m_kind == other.m_kind
- && m_source == other.m_source
- && m_key == other.m_key;
- }
-
- void dump(PrintStream& out) const
- {
- switch (m_kind) {
- case InvalidRangeKind:
- out.print("InvalidRangeKind(");
- break;
- case Addition:
- out.print("Addition(");
- break;
- case ArrayBounds:
- out.print("ArrayBounds(");
- break;
- }
- out.print(m_source, ", ", m_key, ")");
- }
-
- RangeKind m_kind;
- Edge m_source;
- Node* m_key;
-};
-
-struct RangeKeyAndAddend {
- RangeKeyAndAddend()
- : m_addend(0)
- {
- }
-
- RangeKeyAndAddend(RangeKey key, int32_t addend)
- : m_key(key)
- , m_addend(addend)
- {
- }
-
- bool operator!() const { return !m_key && !m_addend; }
-
- void dump(PrintStream& out) const
- {
- out.print(m_key, " + ", m_addend);
- }
-
- RangeKey m_key;
- int32_t m_addend;
-};
-
-struct Range {
- Range()
- : m_minBound(0)
- , m_maxBound(0)
- , m_count(0)
- , m_hoisted(false)
- {
- }
-
- void dump(PrintStream& out) const
- {
- out.print("(", m_minBound, " @", m_minOrigin, ") .. (", m_maxBound, " @", m_maxOrigin, "), count = ", m_count, ", hoisted = ", m_hoisted);
- }
-
- int32_t m_minBound;
- CodeOrigin m_minOrigin;
- int32_t m_maxBound;
- CodeOrigin m_maxOrigin;
- unsigned m_count; // If this is zero then the bounds won't necessarily make sense.
- bool m_hoisted;
-};
-
-} // anonymous namespace
-
-class IntegerCheckCombiningPhase : public Phase {
-public:
- IntegerCheckCombiningPhase(Graph& graph)
- : Phase(graph, "integer check combining")
- , m_insertionSet(graph)
- {
- }
-
- bool run()
- {
- ASSERT(m_graph.m_form == SSA);
-
- m_changed = false;
-
- for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;)
- handleBlock(blockIndex);
-
- return m_changed;
- }
-
-private:
- void handleBlock(BlockIndex blockIndex)
- {
- BasicBlock* block = m_graph.block(blockIndex);
- if (!block)
- return;
-
- m_map.clear();
-
- // First we collect Ranges. If operations within the range have enough redundancy,
- // we hoist. And then we remove additions and checks that fall within the max range.
-
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
- RangeKeyAndAddend data = rangeKeyAndAddend(node);
- if (verbose)
- dataLog("For ", node, ": ", data, "\n");
- if (!data)
- continue;
-
- Range& range = m_map[data.m_key];
- if (verbose)
- dataLog(" Range: ", range, "\n");
- if (range.m_count) {
- if (data.m_addend > range.m_maxBound) {
- range.m_maxBound = data.m_addend;
- range.m_maxOrigin = node->origin.semantic;
- } else if (data.m_addend < range.m_minBound) {
- range.m_minBound = data.m_addend;
- range.m_minOrigin = node->origin.semantic;
- }
- } else {
- range.m_maxBound = data.m_addend;
- range.m_minBound = data.m_addend;
- range.m_minOrigin = node->origin.semantic;
- range.m_maxOrigin = node->origin.semantic;
- }
- range.m_count++;
- if (verbose)
- dataLog(" New range: ", range, "\n");
- }
-
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
- RangeKeyAndAddend data = rangeKeyAndAddend(node);
- if (!data)
- continue;
- Range range = m_map[data.m_key];
- if (!isValid(data.m_key, range))
- continue;
-
- // Do the hoisting.
- if (!range.m_hoisted) {
- switch (data.m_key.m_kind) {
- case Addition: {
- if (range.m_minBound < 0) {
- insertAdd(
- nodeIndex, NodeOrigin(range.m_minOrigin, node->origin.forExit),
- data.m_key.m_source, range.m_minBound);
- }
- if (range.m_maxBound > 0) {
- insertAdd(
- nodeIndex, NodeOrigin(range.m_maxOrigin, node->origin.forExit),
- data.m_key.m_source, range.m_maxBound);
- }
- break;
- }
-
- case ArrayBounds: {
- Node* minNode;
- Node* maxNode;
-
- if (!data.m_key.m_source) {
- minNode = 0;
- maxNode = m_insertionSet.insertConstant(
- nodeIndex, range.m_maxOrigin, jsNumber(range.m_maxBound));
- } else {
- minNode = insertAdd(
- nodeIndex, NodeOrigin(range.m_minOrigin, node->origin.forExit),
- data.m_key.m_source, range.m_minBound, Arith::Unchecked);
- maxNode = insertAdd(
- nodeIndex, NodeOrigin(range.m_maxOrigin, node->origin.forExit),
- data.m_key.m_source, range.m_maxBound, Arith::Unchecked);
- }
-
- if (minNode) {
- m_insertionSet.insertNode(
- nodeIndex, SpecNone, CheckInBounds, node->origin,
- Edge(minNode, Int32Use), Edge(data.m_key.m_key, Int32Use));
- }
- m_insertionSet.insertNode(
- nodeIndex, SpecNone, CheckInBounds, node->origin,
- Edge(maxNode, Int32Use), Edge(data.m_key.m_key, Int32Use));
- break;
- }
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- m_changed = true;
- m_map[data.m_key].m_hoisted = true;
- }
-
- // Do the elimination.
- switch (data.m_key.m_kind) {
- case Addition:
- node->setArithMode(Arith::Unchecked);
- m_changed = true;
- break;
-
- case ArrayBounds:
- node->remove();
- m_changed = true;
- break;
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
- }
-
- m_insertionSet.execute(block);
- }
-
- RangeKeyAndAddend rangeKeyAndAddend(Node* node)
- {
- switch (node->op()) {
- case ArithAdd: {
- if (node->arithMode() != Arith::CheckOverflow
- && node->arithMode() != Arith::CheckOverflowAndNegativeZero)
- break;
- if (!node->child2()->isInt32Constant())
- break;
- return RangeKeyAndAddend(
- RangeKey::addition(node->child1()),
- node->child2()->asInt32());
- }
-
- case CheckInBounds: {
- Edge source;
- int32_t addend;
- Node* key = node->child2().node();
-
- Edge index = node->child1();
-
- if (index->isInt32Constant()) {
- source = Edge();
- addend = index->asInt32();
- } else if (
- index->op() == ArithAdd
- && index->isBinaryUseKind(Int32Use)
- && index->child2()->isInt32Constant()) {
- source = index->child1();
- addend = index->child2()->asInt32();
- } else {
- source = index;
- addend = 0;
- }
-
- return RangeKeyAndAddend(RangeKey::arrayBounds(source, key), addend);
- }
-
- default:
- break;
- }
-
- return RangeKeyAndAddend();
- }
-
- bool isValid(const RangeKey& key, const Range& range)
- {
- if (range.m_count < 2)
- return false;
-
- switch (key.m_kind) {
- case ArrayBounds: {
- // Have to do this carefully because C++ compilers are too smart. But all we're really doing is detecting if
- // the difference between the bounds is 2^31 or more. If it was, then we'd have to worry about wrap-around.
- // The way we'd like to write this expression is (range.m_maxBound - range.m_minBound) >= 0, but that is a
- // signed subtraction and compare, which allows the C++ compiler to do anything it wants in case of
- // wrap-around.
- uint32_t maxBound = range.m_maxBound;
- uint32_t minBound = range.m_minBound;
- uint32_t unsignedDifference = maxBound - minBound;
- return !(unsignedDifference >> 31);
- }
-
- default:
- return true;
- }
- }
-
- Node* insertAdd(
- unsigned nodeIndex, NodeOrigin origin, Edge source, int32_t addend,
- Arith::Mode arithMode = Arith::CheckOverflow)
- {
- if (!addend)
- return source.node();
- return m_insertionSet.insertNode(
- nodeIndex, source->prediction(), source->result(),
- ArithAdd, origin, OpInfo(arithMode), source,
- m_insertionSet.insertConstantForUse(
- nodeIndex, origin, jsNumber(addend), source.useKind()));
- }
-
- typedef std::unordered_map<RangeKey, Range, HashMethod<RangeKey>> RangeMap;
- RangeMap m_map;
-
- InsertionSet m_insertionSet;
- bool m_changed;
-};
-
-bool performIntegerCheckCombining(Graph& graph)
-{
- SamplingRegion samplingRegion("DFG Integer Check Combining Phase");
- return runPhase<IntegerCheckCombiningPhase>(graph);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGIntegerRangeOptimizationPhase.cpp b/Source/JavaScriptCore/dfg/DFGIntegerRangeOptimizationPhase.cpp
deleted file mode 100644
index e2dca7ca7..000000000
--- a/Source/JavaScriptCore/dfg/DFGIntegerRangeOptimizationPhase.cpp
+++ /dev/null
@@ -1,1729 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGIntegerRangeOptimizationPhase.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGBlockMapInlines.h"
-#include "DFGGraph.h"
-#include "DFGInsertionSet.h"
-#include "DFGPhase.h"
-#include "DFGPredictionPropagationPhase.h"
-#include "DFGVariableAccessDataDump.h"
-#include "JSCInlines.h"
-
-namespace JSC { namespace DFG {
-
-namespace {
-
-const bool verbose = false;
-
-int64_t clampedSumImpl() { return 0; }
-
-template<typename... Args>
-int64_t clampedSumImpl(int left, Args... args)
-{
- return static_cast<int64_t>(left) + clampedSumImpl(args...);
-}
-
-template<typename... Args>
-int clampedSum(Args... args)
-{
- int64_t result = clampedSumImpl(args...);
- return static_cast<int>(std::min(
- static_cast<int64_t>(std::numeric_limits<int>::max()),
- std::max(
- static_cast<int64_t>(std::numeric_limits<int>::min()),
- result)));
-}
-
-bool isGeneralOffset(int offset)
-{
- return offset >= -1 && offset <= 1;
-}
-
-class Relationship {
-public:
- enum Kind {
- LessThan,
- Equal,
- NotEqual,
- GreaterThan
- };
-
- // Some relationships provide more information than others. When a relationship provides more
- // information, it is less vague.
- static unsigned vagueness(Kind kind)
- {
- switch (kind) {
- case Equal:
- return 0;
- case LessThan:
- case GreaterThan:
- return 1;
- case NotEqual:
- return 2;
- }
- RELEASE_ASSERT_NOT_REACHED();
- return 0;
- }
-
- static const unsigned minVagueness = 0;
- static const unsigned maxVagueness = 2;
-
- static Kind flipped(Kind kind)
- {
- switch (kind) {
- case LessThan:
- return GreaterThan;
- case Equal:
- return Equal;
- case NotEqual:
- return NotEqual;
- case GreaterThan:
- return LessThan;
- }
- RELEASE_ASSERT_NOT_REACHED();
- return kind;
- }
-
- Relationship()
- : m_left(nullptr)
- , m_right(nullptr)
- , m_kind(Equal)
- , m_offset(0)
- {
- }
-
- Relationship(Node* left, Node* right, Kind kind, int offset = 0)
- : m_left(left)
- , m_right(right)
- , m_kind(kind)
- , m_offset(offset)
- {
- RELEASE_ASSERT(m_left);
- RELEASE_ASSERT(m_right);
- RELEASE_ASSERT(m_left != m_right);
- }
-
- static Relationship safeCreate(Node* left, Node* right, Kind kind, int offset = 0)
- {
- if (!left || !right || left == right)
- return Relationship();
- return Relationship(left, right, kind, offset);
- }
-
- explicit operator bool() const { return m_left; }
-
- Node* left() const { return m_left; }
- Node* right() const { return m_right; }
- Kind kind() const { return m_kind; }
- int offset() const { return m_offset; }
-
- unsigned vagueness() const { return vagueness(kind()); }
-
- Relationship flipped() const
- {
- if (!*this)
- return Relationship();
-
- // This should return Relationship() if -m_offset overflows. For example:
- //
- // @a > @b - 2**31
- //
- // If we flip it we get:
- //
- // @b < @a + 2**31
- //
- // Except that the sign gets flipped since it's INT_MIN:
- //
- // @b < @a - 2**31
- //
- // And that makes no sense. To see how little sense it makes, consider:
- //
- // @a > @zero - 2**31
- //
- // We would flip it to mean:
- //
- // @zero < @a - 2**31
- //
- // Which is absurd.
-
- if (m_offset == std::numeric_limits<int>::min())
- return Relationship();
-
- return Relationship(m_right, m_left, flipped(m_kind), -m_offset);
- }
-
- Relationship inverse() const
- {
- if (!*this)
- return *this;
-
- switch (m_kind) {
- case Equal:
- return Relationship(m_left, m_right, NotEqual, m_offset);
- case NotEqual:
- return Relationship(m_left, m_right, Equal, m_offset);
- case LessThan:
- if (sumOverflows<int>(m_offset, -1))
- return Relationship();
- return Relationship(m_left, m_right, GreaterThan, m_offset - 1);
- case GreaterThan:
- if (sumOverflows<int>(m_offset, 1))
- return Relationship();
- return Relationship(m_left, m_right, LessThan, m_offset + 1);
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- bool isCanonical() const { return m_left < m_right; }
-
- Relationship canonical() const
- {
- if (isCanonical())
- return *this;
- return flipped();
- }
-
- bool sameNodesAs(const Relationship& other) const
- {
- return m_left == other.m_left
- && m_right == other.m_right;
- }
-
- bool operator==(const Relationship& other) const
- {
- return sameNodesAs(other)
- && m_kind == other.m_kind
- && m_offset == other.m_offset;
- }
-
- bool operator!=(const Relationship& other) const
- {
- return !(*this == other);
- }
-
- bool operator<(const Relationship& other) const
- {
- if (m_left != other.m_left)
- return m_left < other.m_left;
- if (m_right != other.m_right)
- return m_right < other.m_right;
- if (m_kind != other.m_kind)
- return m_kind < other.m_kind;
- return m_offset < other.m_offset;
- }
-
- // If possible, returns a form of this relationship where the given node is the left
- // side. Returns a null relationship if this relationship cannot say anything about this
- // node.
- Relationship forNode(Node* node) const
- {
- if (m_left == node)
- return *this;
- if (m_right == node)
- return flipped();
- return Relationship();
- }
-
- void setLeft(Node* left)
- {
- RELEASE_ASSERT(left != m_right);
- m_left = left;
- }
- bool addToOffset(int offset)
- {
- if (sumOverflows<int>(m_offset, offset))
- return false;
- m_offset += offset;
- return true;
- }
-
- // Attempts to create relationships that summarize the union of this relationship and
- // the other relationship. Relationships are returned by calling the functor with the newly
- // created relationships. No relationships are created to indicate TOP. This is used
- // for merging the current relationship-at-head for some pair of nodes and a new
- // relationship-at-head being proposed by a predecessor. We wish to create a new
- // relationship that is true whenever either of them are true, which ensuring that we don't
- // do this forever. Anytime we create a relationship that is not equal to either of the
- // previous ones, we will cause the analysis fixpoint to reexecute.
- //
- // If *this and other are identical, we just pass it to the functor.
- //
- // If they are different, we pick from a finite set of "general" relationships:
- //
- // Eq: this == other + C, where C is -1, 0, or 1.
- // Lt: this < other + C, where C is -1, 0, or 1.
- // Gt: this > other + C, where C is -1, 0, or 1.
- // Ne: this != other + C, where C is -1, 0, or 1.
- // TOP: the null relationship.
- //
- // Constraining C to -1,0,1 is necessary to ensure that the set of general relationships is
- // finite. This finite set of relationships forms a pretty simple lattice where a
- // relA->relB means "relB is more general than relA". For example, this<other+1 is more
- // general than this==other. I'll leave it as an exercise for the reader to see that a
- // graph between the 13 general relationships is indeed a lattice. The fact that the set of
- // general relationships is a finite lattice ensures monotonicity of the fixpoint, since
- // any merge over not-identical relationships returns a relationship that is closer to the
- // TOP relationship than either of the original relationships. Here's how convergence is
- // achieved for any pair of relationships over the same nodes:
- //
- // - If they are identical, then returning *this means that we won't be responsible for
- // causing another fixpoint iteration. Once all merges reach this point, we're done.
- //
- // - If they are different, then we pick the most constraining of the 13 general
- // relationships that is true if either *this or other are true. This means that if the
- // relationships are not identical, the merged relationship will be closer to TOP than
- // either of the originals. Returning a different relationship means that we will be
- // responsible for the fixpoint to reloop, but we can only do this at most 13 times since
- // that's how "deep" the general relationship lattice is.
- //
- // Note that C being constrained to -1,0,1 also ensures that we never have to return a
- // combination of Lt and Gt, as in for example this<other+C && this>other-D. The only possible
- // values of C and D where this would work are -1 and 1, but in that case we just say
- // this==other. That said, the logic for merging two == relationships, like this==other+C ||
- // this==other+D is to attempt to create these two relationships: this>other+min(C,D)-1 &&
- // this<other+max(C,D)+1. But only one of these relationships will belong to the set of general
- // relationships.
- //
- // Here's an example of this in action:
- //
- // for (var i = a; ; ++i) { }
- //
- // Without C being constrained to -1,0,1, we could end up looping forever: first we'd say
- // that i==a, then we might say that i<a+2, then i<a+3, then i<a+4, etc. We won't do this
- // because i<a+2 is not a valid general relationship: so when we merge i==a from the first
- // iteration and i==a+1 from the second iteration, we create i>a-1 and i<a+2 but then
- // realize that only i>a-1 is a valid general relationship. This gives us exactly what we
- // want: a statement that i>=a.
- //
- // However, this may return a pair of relationships when merging relationships involving
- // constants. For example, if given:
- //
- // @x == @c
- // @x == @d
- //
- // where @c and @d are constants, then this may pass two relationships to the functor:
- //
- // @x > min(@c, @d) - 1
- // @x < max(@c, @d) + 1
- //
- // This still allows for convergence, because just as when merging relationships over
- // variables, this always picks from a set of general relationships. Hence although this may
- // produce two relationships as a result of the merge, the total number of relationships that
- // can be present at head of block is limited by O(graph.size^2).
- template<typename Functor>
- void merge(const Relationship& other, const Functor& functor) const
- {
- // Handle the super obvious case first.
- if (*this == other) {
- functor(*this);
- return;
- }
-
- if (m_left != other.m_left)
- return;
-
- if (m_right != other.m_right) {
- mergeConstantsImpl(other, functor);
- return;
- }
-
- ASSERT(sameNodesAs(other));
-
- // This does some interesting permutations to reduce the amount of duplicate code. For
- // example:
- //
- // initially: @a != @b, @a > @b
- // @b != @a, @b < @a
- // @b < @a, @b != @a
- // finally: @b != a, @b < @a
- //
- // Another example:
- //
- // initially: @a < @b, @a != @b
- // finally: @a != @b, @a < @b
-
- Relationship a = *this;
- Relationship b = other;
- bool needFlip = false;
-
- // Get rid of GreaterThan.
- if (a.m_kind == GreaterThan || b.m_kind == GreaterThan) {
- a = a.flipped();
- b = b.flipped();
-
- // In rare cases, we might not be able to flip. Just give up on life in those
- // cases.
- if (!a || !b)
- return;
-
- needFlip = true;
-
- // If we still have GreaterThan, then it means that we started with @a < @b and
- // @a > @b. That's pretty much always a tautology; we don't attempt to do smart
- // things for that case for now.
- if (a.m_kind == GreaterThan || b.m_kind == GreaterThan)
- return;
- }
-
- // Make sure that if we have a LessThan, then it's first.
- if (b.m_kind == LessThan)
- std::swap(a, b);
-
- // Make sure that if we have a NotEqual, then it's first.
- if (b.m_kind == NotEqual)
- std::swap(a, b);
-
- Relationship result = a.mergeImpl(b);
- if (!result)
- return;
-
- if (needFlip)
- result = result.flipped();
-
- functor(result);
- }
-
- // Attempts to construct one Relationship that adequately summarizes the intersection of
- // this and other. Returns a null relationship if the filtration should be expressed as two
- // different relationships. Returning null is always safe because relationship lists in
- // this phase always imply intersection. So, you could soundly skip calling this method and
- // just put both relationships into the list. But, that could lead the fixpoint to diverge.
- // Hence this will attempt to combine the two relationships into one as a convergence hack.
- // In some cases, it will do something conservative. It's always safe for this to return
- // *this, or to return other. It'll do that sometimes, mainly to accelerate convergence for
- // things that we don't think are important enough to slow down the analysis.
- Relationship filter(const Relationship& other) const
- {
- // We are only interested in merging relationships over the same nodes.
- ASSERT(sameNodesAs(other));
-
- if (*this == other)
- return *this;
-
- // From here we can assume that the two relationships are not identical. Usually we use
- // this to assume that we have different offsets anytime that everything but the offset
- // is identical.
-
- // We want equality to take precedent over everything else, and we don't want multiple
- // independent claims of equality. That would just be a contradiction. When it does
- // happen, we will be conservative in the sense that we will pick one.
- if (m_kind == Equal)
- return *this;
- if (other.m_kind == Equal)
- return other;
-
- // Useful helper for flipping.
- auto filterFlipped = [&] () -> Relationship {
- // If we cannot flip, then just conservatively return *this.
- Relationship a = flipped();
- Relationship b = other.flipped();
- if (!a || !b)
- return *this;
- Relationship result = a.filter(b);
- if (!result)
- return Relationship();
- result = result.flipped();
- if (!result)
- return *this;
- return result;
- };
-
- if (m_kind == NotEqual) {
- if (other.m_kind == NotEqual) {
- // We could do something smarter here. We could even keep both NotEqual's. We
- // would need to make sure that we correctly collapsed them when merging. But
- // for now, we just pick one of them and hope for the best.
- return *this;
- }
-
- if (other.m_kind == GreaterThan) {
- // Implement this in terms of NotEqual.filter(LessThan).
- return filterFlipped();
- }
-
- ASSERT(other.m_kind == LessThan);
- // We have two claims:
- // @a != @b + C
- // @a < @b + D
- //
- // If C >= D, then the NotEqual is redundant.
- // If C < D - 1, then we could keep both, but for now we just keep the LessThan.
- // If C == D - 1, then the LessThan can be turned into:
- //
- // @a < @b + C
- //
- // Note that C == this.m_offset, D == other.m_offset.
-
- if (m_offset == other.m_offset - 1)
- return Relationship(m_left, m_right, LessThan, m_offset);
-
- return other;
- }
-
- if (other.m_kind == NotEqual)
- return other.filter(*this);
-
- if (m_kind == LessThan) {
- if (other.m_kind == LessThan) {
- return Relationship(
- m_left, m_right, LessThan, std::min(m_offset, other.m_offset));
- }
-
- ASSERT(other.m_kind == GreaterThan);
- if (sumOverflows<int>(m_offset, -1))
- return Relationship();
- if (sumOverflows<int>(other.m_offset, 1))
- return Relationship();
- if (m_offset - 1 == other.m_offset + 1)
- return Relationship(m_left, m_right, Equal, m_offset - 1);
-
- return Relationship();
- }
-
- ASSERT(m_kind == GreaterThan);
- return filterFlipped();
- }
-
- // Come up with a relationship that is the best description of this && other, provided that left() is
- // the same and right() is a constant. Also requires that this is at least as vague as other. It may
- // return this or it may return something else, but whatever it returns, it will have the same nodes as
- // this. This is not automatically done by filter() because it currently only makes sense to call this
- // during a very particular part of setOneSide().
- Relationship filterConstant(const Relationship& other) const
- {
- ASSERT(m_left == other.m_left);
- ASSERT(m_right->isInt32Constant());
- ASSERT(other.m_right->isInt32Constant());
- ASSERT(vagueness() >= other.vagueness());
-
- if (vagueness() == other.vagueness())
- return *this;
-
- int thisRight = m_right->asInt32();
- int otherRight = other.m_right->asInt32();
-
- // Ignore funny business.
- if (sumOverflows<int>(otherRight, other.m_offset))
- return *this;
-
- int otherEffectiveRight = otherRight + other.m_offset;
-
- switch (other.m_kind) {
- case Equal:
- // Return a version of *this that is Equal to other's constant.
- return Relationship(m_left, m_right, Equal, otherEffectiveRight - thisRight);
-
- case LessThan:
- case GreaterThan:
- ASSERT(m_kind == NotEqual);
- // We could do smart things here. But we don't currently have an example of when it would be
- // valuable. Note that you have to be careful. We could refine NotEqual to LessThan, but only
- // if the LessThan subsumes the NotEqual.
- return *this;
-
- case NotEqual:
- RELEASE_ASSERT_NOT_REACHED();
- return Relationship();
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- return Relationship();
- }
-
- int minValueOfLeft() const
- {
- if (m_left->isInt32Constant())
- return m_left->asInt32();
-
- if (m_kind == LessThan || m_kind == NotEqual)
- return std::numeric_limits<int>::min();
-
- int minRightValue = std::numeric_limits<int>::min();
- if (m_right->isInt32Constant())
- minRightValue = m_right->asInt32();
-
- if (m_kind == GreaterThan)
- return clampedSum(minRightValue, m_offset, 1);
- ASSERT(m_kind == Equal);
- return clampedSum(minRightValue, m_offset);
- }
-
- int maxValueOfLeft() const
- {
- if (m_left->isInt32Constant())
- return m_left->asInt32();
-
- if (m_kind == GreaterThan || m_kind == NotEqual)
- return std::numeric_limits<int>::max();
-
- int maxRightValue = std::numeric_limits<int>::max();
- if (m_right->isInt32Constant())
- maxRightValue = m_right->asInt32();
-
- if (m_kind == LessThan)
- return clampedSum(maxRightValue, m_offset, -1);
- ASSERT(m_kind == Equal);
- return clampedSum(maxRightValue, m_offset);
- }
-
- void dump(PrintStream& out) const
- {
- // This prints out the relationship without any whitespace, like @x<@y+42. This
- // optimizes for the clarity of a list of relationships. It's easier to read something
- // like [@1<@2+3, @4==@5-6] than it would be if there was whitespace inside the
- // relationships.
-
- if (!*this) {
- out.print("null");
- return;
- }
-
- out.print(m_left);
- switch (m_kind) {
- case LessThan:
- out.print("<");
- break;
- case Equal:
- out.print("==");
- break;
- case NotEqual:
- out.print("!=");
- break;
- case GreaterThan:
- out.print(">");
- break;
- }
- out.print(m_right);
- if (m_offset > 0)
- out.print("+", m_offset);
- else if (m_offset < 0)
- out.print("-", -static_cast<int64_t>(m_offset));
- }
-
-private:
- Relationship mergeImpl(const Relationship& other) const
- {
- ASSERT(sameNodesAs(other));
- ASSERT(m_kind != GreaterThan);
- ASSERT(other.m_kind != GreaterThan);
- ASSERT(*this != other);
-
- // The purpose of this method is to guarantee that:
- //
- // - We avoid having more than one Relationship over the same two nodes. Therefore, if
- // the merge could be expressed as two Relationships, we prefer to instead pick the
- // less precise single Relationship form even if that means TOP.
- //
- // - If the difference between two Relationships is just the m_offset, then we create a
- // Relationship that has an offset of -1, 0, or 1. This is an essential convergence
- // hack. We need -1 and 1 to support <= and >=.
-
- // From here we can assume that the two relationships are not identical. Usually we use
- // this to assume that we have different offsets anytime that everything but the offset
- // is identical.
-
- if (m_kind == NotEqual) {
- if (other.m_kind == NotEqual)
- return Relationship(); // Different offsets, so tautology.
-
- if (other.m_kind == Equal) {
- if (m_offset != other.m_offset) {
- // Saying that you might be B when you've already said that you're anything
- // but A, where A and B are different, is a tautology. You could just say
- // that you're anything but A. Adding "(a == b + 1)" to "(a != b + 5)" has
- // no value.
- return *this;
- }
- // Otherwise, same offsets: we're saying that you're either A or you're not
- // equal to A.
-
- return Relationship();
- }
-
- RELEASE_ASSERT(other.m_kind == LessThan);
- // We have these claims, and we're merging them:
- // @a != @b + C
- // @a < @b + D
- //
- // If we have C == D, then the merge is clearly just the NotEqual.
- // If we have C < D, then the merge is a tautology.
- // If we have C > D, then we could keep both claims, but we are cheap, so we
- // don't. We just use the NotEqual.
-
- if (m_offset < other.m_offset)
- return Relationship();
-
- return *this;
- }
-
- if (m_kind == LessThan) {
- if (other.m_kind == LessThan) {
- // Figure out what offset to select to merge them. The appropriate offsets are
- // -1, 0, or 1.
-
- // First figure out what offset we'd like to use.
- int bestOffset = std::max(m_offset, other.m_offset);
-
- // We have something like @a < @b + 2. We can't represent this under the
- // -1,0,1 rule.
- if (isGeneralOffset(bestOffset))
- return Relationship(m_left, m_right, LessThan, std::max(bestOffset, -1));
-
- return Relationship();
- }
-
- // The only thing left is Equal. We would have eliminated the GreaterThan's, and
- // if we merge LessThan and NotEqual, the NotEqual always comes first.
- RELEASE_ASSERT(other.m_kind == Equal);
-
- // This is the really interesting case. We have:
- //
- // @a < @b + C
- //
- // and:
- //
- // @a == @b + D
- //
- // Therefore we'd like to return:
- //
- // @a < @b + max(C, D + 1)
-
- int bestOffset = std::max(m_offset, other.m_offset + 1);
-
- // We have something like @a < @b + 2. We can't do it.
- if (isGeneralOffset(bestOffset))
- return Relationship(m_left, m_right, LessThan, std::max(bestOffset, -1));
-
- return Relationship();
- }
-
- // The only thing left is Equal, since we would have gotten rid of the GreaterThan's.
- RELEASE_ASSERT(m_kind == Equal);
-
- // We would never see NotEqual, because those always come first. We would never
- // see GreaterThan, because we would have eliminated those. We would never see
- // LessThan, because those always come first.
-
- RELEASE_ASSERT(other.m_kind == Equal);
- // We have @a == @b + C and @a == @b + D, where C != D. Turn this into some
- // inequality that involves a constant that is -1,0,1. Note that we will never have
- // lessThan and greaterThan because the constants are constrained to -1,0,1. The only
- // way for both of them to be valid is a<b+1 and a>b-1, but then we would have said
- // a==b.
-
- Relationship lessThan;
- Relationship greaterThan;
-
- int lessThanEqOffset = std::max(m_offset, other.m_offset);
- if (lessThanEqOffset >= -2 && lessThanEqOffset <= 0) {
- lessThan = Relationship(
- m_left, other.m_right, LessThan, lessThanEqOffset + 1);
-
- ASSERT(isGeneralOffset(lessThan.offset()));
- }
-
- int greaterThanEqOffset = std::min(m_offset, other.m_offset);
- if (greaterThanEqOffset >= 0 && greaterThanEqOffset <= 2) {
- greaterThan = Relationship(
- m_left, other.m_right, GreaterThan, greaterThanEqOffset - 1);
-
- ASSERT(isGeneralOffset(greaterThan.offset()));
- }
-
- if (lessThan) {
- // Both relationships cannot be valid; see above.
- RELEASE_ASSERT(!greaterThan);
-
- return lessThan;
- }
-
- return greaterThan;
- }
-
- template<typename Functor>
- void mergeConstantsImpl(const Relationship& other, const Functor& functor) const
- {
- ASSERT(m_left == other.m_left);
-
- // Only deal with constant right.
- if (!m_right->isInt32Constant() || !other.m_right->isInt32Constant())
- return;
-
- // What follows is a fairly conservative merge. We could tune this phase to come up with
- // all possible inequalities between variables and constants, but we focus mainly on cheap
- // cases for now.
-
- // Here are some of the the arrangements we can merge usefully assuming @c < @d:
- //
- // @x == @c || @x == @d => @x >= c && @x <= @d
- // @x >= @c || @x <= @d => TOP
- // @x == @c || @x != @d => @x != @d
-
- int thisRight = m_right->asInt32();
- int otherRight = other.m_right->asInt32();
-
- // Ignore funny business.
- if (sumOverflows<int>(thisRight, m_offset))
- return;
- if (sumOverflows<int>(otherRight, other.m_offset))
- return;
-
- int thisEffectiveRight = thisRight + m_offset;
- int otherEffectiveRight = otherRight + other.m_offset;
-
- auto makeUpper = [&] (int64_t upper) {
- if (upper <= thisRight) {
- // We want m_right + offset to be equal to upper. Hence we want offset to cancel
- // with m_right. But there's more to it, since we want +1 to turn the LessThan into
- // a LessThanOrEqual, and we want to make sure we don't end up with non-general
- // offsets.
- int offset = static_cast<int>(std::max(
- static_cast<int64_t>(1) + upper - static_cast<int64_t>(thisRight),
- static_cast<int64_t>(-1)));
- functor(Relationship(m_left, m_right, LessThan, offset));
- }
- if (upper <= otherRight) {
- int offset = static_cast<int>(std::max(
- static_cast<int64_t>(1) + upper - static_cast<int64_t>(otherRight),
- static_cast<int64_t>(-1)));
- functor(Relationship(m_left, other.m_right, LessThan, offset));
- }
- };
- auto makeLower = [&] (int64_t lower) {
- if (lower >= thisRight) {
- // We want m_right + offset to be equal to lower. Hence we want offset to cancel with
- // m_right. But there's more to it, since we want -1 to turn the GreaterThan into a
- // GreaterThanOrEqual, and we want to make sure we don't end up with non-general
- // offsets.
- int offset = static_cast<int>(std::min(
- static_cast<int64_t>(-1) + lower - static_cast<int64_t>(thisRight),
- static_cast<int64_t>(1)));
- functor(Relationship(m_left, m_right, GreaterThan, offset));
- }
- if (lower >= otherRight) {
- int offset = static_cast<int>(std::min(
- static_cast<int64_t>(-1) + lower - static_cast<int64_t>(otherRight),
- static_cast<int64_t>(1)));
- functor(Relationship(m_left, other.m_right, GreaterThan, offset));
- }
- };
-
- switch (m_kind) {
- case Equal: {
- switch (other.m_kind) {
- case Equal: {
- if (thisEffectiveRight == otherEffectiveRight) {
- // This probably won't arise often. We can keep whichever relationship is general.
- if (isGeneralOffset(m_offset))
- functor(*this);
- if (isGeneralOffset(other.m_offset))
- functor(other);
- return;
- }
-
- // What follows is the only case where a merge will create more rules than what it
- // started with. This is fine for convergence because the LessThan/GreaterThan
- // rules that this creates are general (i.e. have small offsets) and they never
- // spawn more rules upon subsequent merging.
-
- makeUpper(std::max(thisEffectiveRight, otherEffectiveRight));
- makeLower(std::min(thisEffectiveRight, otherEffectiveRight));
- return;
- }
-
- case LessThan: {
- // Either the LessThan condition subsumes the equality, or the LessThan condition
- // and equality merge together to create a looser LessThan condition.
-
- // This is @x == thisEffectiveRight
- // Other is: @x < otherEffectiveRight
-
- // We want to create @x <= upper. Figure out the value of upper.
- makeUpper(std::max(
- static_cast<int64_t>(thisEffectiveRight),
- static_cast<int64_t>(otherEffectiveRight) - 1));
- return;
- }
-
- case GreaterThan: {
- // Opposite of the LessThan case, above.
-
- // This is: @x == thisEffectiveRight
- // Other is: @x > otherEffectiveRight
-
- makeLower(std::min(
- static_cast<int64_t>(thisEffectiveRight),
- static_cast<int64_t>(otherEffectiveRight) + 1));
- return;
- }
-
- case NotEqual: {
- // We keep the NotEqual so long as it doesn't contradict our Equal.
- if (otherEffectiveRight == thisEffectiveRight)
- return;
-
- // But, we only keep the NotEqual if it is general. This simplifies reasoning about
- // convergence: merging never introduces a new rule unless that rule is general.
- if (!isGeneralOffset(other.m_offset))
- return;
-
- functor(other);
- return;
- } }
-
- RELEASE_ASSERT_NOT_REACHED();
- return;
- }
-
- case LessThan: {
- switch (other.m_kind) {
- case Equal: {
- other.mergeConstantsImpl(*this, functor);
- return;
- }
-
- case LessThan: {
- makeUpper(std::max(
- static_cast<int64_t>(thisEffectiveRight) - 1,
- static_cast<int64_t>(otherEffectiveRight) - 1));
- return;
- }
-
- case GreaterThan: {
- // We have a claim that @x > @c || @x < @d. If @d > @c, this is the tautology. If
- // @d <= @c, it's sort of uninteresting. Just ignore this.
- return;
- }
-
- case NotEqual: {
- // We have a claim that @x < @c || @x != @d. This isn't interesting.
- return;
- } }
-
- RELEASE_ASSERT_NOT_REACHED();
- return;
- }
-
- case GreaterThan: {
- switch (other.m_kind) {
- case Equal: {
- other.mergeConstantsImpl(*this, functor);
- return;
- }
-
- case LessThan: {
- // Not interesting, see above.
- return;
- }
-
- case GreaterThan: {
- makeLower(std::min(
- static_cast<int64_t>(thisEffectiveRight) + 1,
- static_cast<int64_t>(otherEffectiveRight) + 1));
- return;
- }
-
- case NotEqual: {
- // Not interesting, see above.
- return;
- } }
-
- RELEASE_ASSERT_NOT_REACHED();
- return;
- }
-
- case NotEqual: {
- if (other.m_kind == Equal)
- other.mergeConstantsImpl(*this, functor);
- return;
- } }
-
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- Node* m_left;
- Node* m_right;
- Kind m_kind;
- int m_offset; // This offset can be arbitrarily large.
-};
-
-typedef HashMap<Node*, Vector<Relationship>> RelationshipMap;
-
-class IntegerRangeOptimizationPhase : public Phase {
-public:
- IntegerRangeOptimizationPhase(Graph& graph)
- : Phase(graph, "integer range optimization")
- , m_zero(nullptr)
- , m_relationshipsAtHead(graph)
- , m_insertionSet(graph)
- {
- }
-
- bool run()
- {
- ASSERT(m_graph.m_form == SSA);
-
- // Before we do anything, make sure that we have a zero constant at the top.
- for (Node* node : *m_graph.block(0)) {
- if (node->isInt32Constant() && !node->asInt32()) {
- m_zero = node;
- break;
- }
- }
- if (!m_zero) {
- m_zero = m_insertionSet.insertConstant(0, NodeOrigin(), jsNumber(0));
- m_insertionSet.execute(m_graph.block(0));
- }
-
- if (verbose) {
- dataLog("Graph before integer range optimization:\n");
- m_graph.dump();
- }
-
- // This performs a fixpoint over the blocks in reverse post-order. Logically, we
- // maintain a list of relationships at each point in the program. The list should be
- // read as an intersection. For example if we have {rel1, rel2, ..., relN}, you should
- // read this as:
- //
- // TOP && rel1 && rel2 && ... && relN
- //
- // This allows us to express things like:
- //
- // @a > @b - 42 && @a < @b + 25
- //
- // But not things like:
- //
- // @a < @b - 42 || @a > @b + 25
- //
- // We merge two lists by merging each relationship in one list with each relationship
- // in the other list. Merging two relationships will yield a relationship list; as with
- // all such lists it is an intersction. Merging relationships over different variables
- // always yields the empty list (i.e. TOP). This merge style is sound because if we
- // have:
- //
- // (A && B && C) || (D && E && F)
- //
- // Then a valid merge is just one that will return true if A, B, C are all true, or
- // that will return true if D, E, F are all true. Our merge style essentially does:
- //
- // (A || D) && (A || E) && (A || F) && (B || D) && (B || E) && (B || F) &&
- // (C || D) && (C || E) && (C || F)
- //
- // If A && B && C is true, then this returns true. If D && E && F is true, this also
- // returns true.
- //
- // While this appears at first like a kind of expression explosion, in practice it
- // isn't. The code that handles this knows that the merge of two relationships over
- // different variables is TOP (i.e. the empty list). For example if A above is @a < @b
- // and B above is @c > @d, where @a, @b, @c, and @d are different nodes, the merge will
- // yield nothing. In fact, the merge algorithm will skip such merges entirely because
- // the relationship lists are actually keyed by node.
- //
- // Note that it's always safe to drop any of relationship from the relationship list.
- // This merely increases the likelihood of the "expression" yielding true, i.e. being
- // closer to TOP. Optimizations are only performed if we can establish that the
- // expression implied by the relationship list is false for all of those cases where
- // some check would have failed.
- //
- // There is no notion of BOTTOM because we treat blocks that haven't had their
- // state-at-head set as a special case: we just transfer all live relationships to such
- // a block. After the head of a block is set, we perform the merging as above. In all
- // other places where we would ordinarily need BOTTOM, we approximate it by having some
- // non-BOTTOM relationship.
-
- BlockList postOrder = m_graph.blocksInPostOrder();
-
- // This loop analyzes the IR to give us m_relationshipsAtHead for each block. This
- // may reexecute blocks many times, but it is guaranteed to converge. The state of
- // the relationshipsAtHead over any pair of nodes converge monotonically towards the
- // TOP relationship (i.e. no relationships in the relationship list). The merge rule
- // when between the current relationshipsAtHead and the relationships being propagated
- // from a predecessor ensures monotonicity by converting disagreements into one of a
- // small set of "general" relationships. There are 12 such relationshis, plus TOP. See
- // the comment above Relationship::merge() for details.
- bool changed = true;
- while (changed) {
- changed = false;
- for (unsigned postOrderIndex = postOrder.size(); postOrderIndex--;) {
- BasicBlock* block = postOrder[postOrderIndex];
- DFG_ASSERT(
- m_graph, nullptr,
- block == m_graph.block(0) || m_seenBlocks.contains(block));
-
- m_relationships = m_relationshipsAtHead[block];
-
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
- if (verbose)
- dataLog("Analysis: at ", node, ": ", listDump(sortedRelationships()), "\n");
- executeNode(node);
- }
-
- // Now comes perhaps the most important piece of cleverness: if we Branch, and
- // the predicate involves some relation over integers, we propagate different
- // information to the taken and notTaken paths. This handles:
- // - Branch on int32.
- // - Branch on LogicalNot on int32.
- // - Branch on compare on int32's.
- // - Branch on LogicalNot of compare on int32's.
- Node* terminal = block->terminal();
- bool alreadyMerged = false;
- if (terminal->op() == Branch) {
- Relationship relationshipForTrue;
- BranchData* branchData = terminal->branchData();
-
- bool invert = false;
- if (terminal->child1()->op() == LogicalNot) {
- terminal = terminal->child1().node();
- invert = true;
- }
-
- if (terminal->child1().useKind() == Int32Use) {
- relationshipForTrue = Relationship::safeCreate(
- terminal->child1().node(), m_zero, Relationship::NotEqual, 0);
- } else {
- Node* compare = terminal->child1().node();
- switch (compare->op()) {
- case CompareEq:
- case CompareStrictEq:
- case CompareLess:
- case CompareLessEq:
- case CompareGreater:
- case CompareGreaterEq: {
- if (!compare->isBinaryUseKind(Int32Use))
- break;
-
- switch (compare->op()) {
- case CompareEq:
- case CompareStrictEq:
- relationshipForTrue = Relationship::safeCreate(
- compare->child1().node(), compare->child2().node(),
- Relationship::Equal, 0);
- break;
- case CompareLess:
- relationshipForTrue = Relationship::safeCreate(
- compare->child1().node(), compare->child2().node(),
- Relationship::LessThan, 0);
- break;
- case CompareLessEq:
- relationshipForTrue = Relationship::safeCreate(
- compare->child1().node(), compare->child2().node(),
- Relationship::LessThan, 1);
- break;
- case CompareGreater:
- relationshipForTrue = Relationship::safeCreate(
- compare->child1().node(), compare->child2().node(),
- Relationship::GreaterThan, 0);
- break;
- case CompareGreaterEq:
- relationshipForTrue = Relationship::safeCreate(
- compare->child1().node(), compare->child2().node(),
- Relationship::GreaterThan, -1);
- break;
- default:
- DFG_CRASH(m_graph, compare, "Invalid comparison node type");
- break;
- }
- break;
- }
-
- default:
- break;
- }
- }
-
- if (invert)
- relationshipForTrue = relationshipForTrue.inverse();
-
- if (relationshipForTrue) {
- RelationshipMap forTrue = m_relationships;
- RelationshipMap forFalse = m_relationships;
-
- if (verbose)
- dataLog("Dealing with true:\n");
- setRelationship(forTrue, relationshipForTrue);
- if (Relationship relationshipForFalse = relationshipForTrue.inverse()) {
- if (verbose)
- dataLog("Dealing with false:\n");
- setRelationship(forFalse, relationshipForFalse);
- }
-
- changed |= mergeTo(forTrue, branchData->taken.block);
- changed |= mergeTo(forFalse, branchData->notTaken.block);
- alreadyMerged = true;
- }
- }
-
- if (!alreadyMerged) {
- for (BasicBlock* successor : block->successors())
- changed |= mergeTo(m_relationships, successor);
- }
- }
- }
-
- // Now we transform the code based on the results computed in the previous loop.
- changed = false;
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- m_relationships = m_relationshipsAtHead[block];
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
- if (verbose)
- dataLog("Transformation: at ", node, ": ", listDump(sortedRelationships()), "\n");
-
- // This ends up being pretty awkward to write because we need to decide if we
- // optimize by using the relationships before the operation, but we need to
- // call executeNode() before we optimize.
- switch (node->op()) {
- case ArithAdd: {
- if (!node->isBinaryUseKind(Int32Use))
- break;
- if (node->arithMode() != Arith::CheckOverflow)
- break;
- if (!node->child2()->isInt32Constant())
- break;
-
- auto iter = m_relationships.find(node->child1().node());
- if (iter == m_relationships.end())
- break;
-
- int minValue = std::numeric_limits<int>::min();
- int maxValue = std::numeric_limits<int>::max();
- for (Relationship relationship : iter->value) {
- minValue = std::max(minValue, relationship.minValueOfLeft());
- maxValue = std::min(maxValue, relationship.maxValueOfLeft());
- }
-
- if (sumOverflows<int>(minValue, node->child2()->asInt32()) ||
- sumOverflows<int>(maxValue, node->child2()->asInt32()))
- break;
-
- executeNode(block->at(nodeIndex));
- node->setArithMode(Arith::Unchecked);
- changed = true;
- break;
- }
-
- case CheckInBounds: {
- auto iter = m_relationships.find(node->child1().node());
- if (iter == m_relationships.end())
- break;
-
- bool nonNegative = false;
- bool lessThanLength = false;
- for (Relationship relationship : iter->value) {
- if (relationship.minValueOfLeft() >= 0)
- nonNegative = true;
-
- if (relationship.right() == node->child2()) {
- if (relationship.kind() == Relationship::Equal
- && relationship.offset() < 0)
- lessThanLength = true;
-
- if (relationship.kind() == Relationship::LessThan
- && relationship.offset() <= 0)
- lessThanLength = true;
- }
- }
-
- if (nonNegative && lessThanLength) {
- executeNode(block->at(nodeIndex));
- node->remove();
- changed = true;
- }
- break;
- }
-
- case GetByVal: {
- if (node->arrayMode().type() != Array::Undecided)
- break;
-
- auto iter = m_relationships.find(node->child2().node());
- if (iter == m_relationships.end())
- break;
-
- int minValue = std::numeric_limits<int>::min();
- for (Relationship relationship : iter->value)
- minValue = std::max(minValue, relationship.minValueOfLeft());
-
- if (minValue < 0)
- break;
-
- executeNode(block->at(nodeIndex));
- m_graph.convertToConstant(node, jsUndefined());
- changed = true;
- break;
- }
-
- default:
- break;
- }
-
- executeNode(block->at(nodeIndex));
- }
- }
-
- return changed;
- }
-
-private:
- void executeNode(Node* node)
- {
- switch (node->op()) {
- case CheckInBounds: {
- setRelationship(Relationship::safeCreate(node->child1().node(), node->child2().node(), Relationship::LessThan));
- setRelationship(Relationship::safeCreate(node->child1().node(), m_zero, Relationship::GreaterThan, -1));
- break;
- }
-
- case ArithAdd: {
- // We're only interested in int32 additions and we currently only know how to
- // handle the non-wrapping ones.
- if (!node->isBinaryUseKind(Int32Use))
- break;
-
- // FIXME: We could handle the unchecked arithmetic case. We just do it don't right
- // now.
- if (node->arithMode() != Arith::CheckOverflow)
- break;
-
- // Handle add: @value + constant.
- if (!node->child2()->isInt32Constant())
- break;
-
- int offset = node->child2()->asInt32();
-
- // We add a relationship for @add == @value + constant, and then we copy the
- // relationships for @value. This gives us a one-deep view of @value's existing
- // relationships, which matches the one-deep search in setRelationship().
-
- setRelationship(
- Relationship(node, node->child1().node(), Relationship::Equal, offset));
-
- auto iter = m_relationships.find(node->child1().node());
- if (iter != m_relationships.end()) {
- Vector<Relationship> toAdd;
- for (Relationship relationship : iter->value) {
- // We have:
- // add: ArithAdd(@x, C)
- // @x op @y + D
- //
- // The following certainly holds:
- // @x == @add - C
- //
- // Which allows us to substitute:
- // @add - C op @y + D
- //
- // And then carry the C over:
- // @add op @y + D + C
-
- Relationship newRelationship = relationship;
- ASSERT(newRelationship.left() == node->child1().node());
-
- if (newRelationship.right() == node)
- continue;
- newRelationship.setLeft(node);
- if (newRelationship.addToOffset(offset))
- toAdd.append(newRelationship);
- }
- for (Relationship relationship : toAdd)
- setRelationship(relationship, 0);
- }
-
- // Now we want to establish that both the input and the output of the addition are
- // within a particular range of integers.
-
- if (offset > 0) {
- // If we have "add: @value + 1" then we know that @value <= max - 1, i.e. that
- // @value < max.
- if (!sumOverflows<int>(std::numeric_limits<int>::max(), -offset, 1)) {
- setRelationship(
- Relationship::safeCreate(
- node->child1().node(), m_zero, Relationship::LessThan,
- std::numeric_limits<int>::max() - offset + 1),
- 0);
- }
-
- // If we have "add: @value + 1" then we know that @add >= min + 1, i.e. that
- // @add > min.
- if (!sumOverflows<int>(std::numeric_limits<int>::min(), offset, -1)) {
- setRelationship(
- Relationship(
- node, m_zero, Relationship::GreaterThan,
- std::numeric_limits<int>::min() + offset - 1),
- 0);
- }
- }
-
- if (offset < 0 && offset != std::numeric_limits<int>::min()) {
- // If we have "add: @value - 1" then we know that @value >= min + 1, i.e. that
- // @value > min.
- if (!sumOverflows<int>(std::numeric_limits<int>::min(), offset, -1)) {
- setRelationship(
- Relationship::safeCreate(
- node->child1().node(), m_zero, Relationship::GreaterThan,
- std::numeric_limits<int>::min() + offset - 1),
- 0);
- }
-
- // If we have "add: @value + 1" then we know that @add <= max - 1, i.e. that
- // @add < max.
- if (!sumOverflows<int>(std::numeric_limits<int>::max(), -offset, 1)) {
- setRelationship(
- Relationship(
- node, m_zero, Relationship::LessThan,
- std::numeric_limits<int>::max() - offset + 1),
- 0);
- }
- }
- break;
- }
-
- case GetArrayLength: {
- setRelationship(Relationship(node, m_zero, Relationship::GreaterThan, -1));
- break;
- }
-
- case Upsilon: {
- setRelationship(
- Relationship::safeCreate(
- node->child1().node(), node->phi(), Relationship::Equal, 0));
-
- auto iter = m_relationships.find(node->child1().node());
- if (iter != m_relationships.end()) {
- Vector<Relationship> toAdd;
- for (Relationship relationship : iter->value) {
- Relationship newRelationship = relationship;
- if (node->phi() == newRelationship.right())
- continue;
- newRelationship.setLeft(node->phi());
- toAdd.append(newRelationship);
- }
- for (Relationship relationship : toAdd)
- setRelationship(relationship);
- }
- break;
- }
-
- default:
- break;
- }
- }
-
- void setRelationship(Relationship relationship, unsigned timeToLive = 1)
- {
- setRelationship(m_relationships, relationship, timeToLive);
- }
-
- void setRelationship(
- RelationshipMap& relationshipMap, Relationship relationship, unsigned timeToLive = 1)
- {
- setOneSide(relationshipMap, relationship, timeToLive);
- setOneSide(relationshipMap, relationship.flipped(), timeToLive);
- }
-
- void setOneSide(
- RelationshipMap& relationshipMap, Relationship relationship, unsigned timeToLive = 1)
- {
- if (!relationship)
- return;
-
- if (verbose)
- dataLog(" Setting: ", relationship, " (ttl = ", timeToLive, ")\n");
-
- auto result = relationshipMap.add(
- relationship.left(), Vector<Relationship>());
- Vector<Relationship>& relationships = result.iterator->value;
-
- if (relationship.right()->isInt32Constant()) {
- // We want to do some work to refine relationships over constants. This is necessary because
- // when we introduce a constant into the IR, we don't automatically create relationships
- // between that constant and the other constants. That means that when we do introduce
- // relationships between a non-constant and a constant, we need to check the other
- // relationships between that non-constant and other constants to see if we can make some
- // refinements. Possible constant statement filtrations:
- //
- // - @x == @c and @x != @d, where @c > @d:
- // @x == @c and @x > @d
- //
- // but actually we are more aggressive:
- //
- // - @x == @c and @x op @d where @c == @d + k
- // @x == @c and @x == @d + k
- //
- // And this is also possible:
- //
- // - @x > @c and @x != @d where @c == @d + k and k >= 0
- //
- // @x > @c and @x > @d + k
- //
- // So, here's what we should do depending on the kind of relationship we're introducing:
- //
- // Equal constant: Find all LessThan, NotEqual, and GreaterThan constant operations and refine
- // them to be Equal constant. Don't worry about contradictions.
- //
- // LessThan, GreaterThan constant: See if there is any Equal constant, and if so, refine to
- // that. Otherwise, find all NotEqual constant operations and refine them to be LessThan or
- // GreaterThan constant if possible.
- //
- // NotEqual constant: See if there is any Equal constant, and if so, refine to that. Otherwise,
- // see if there is any LessThan or GreaterThan constant operation, and if so, attempt to
- // refine to that.
- //
- // Seems that the key thing is to have a filterConstant() operation that returns a refined
- // version of *this based on other. The code here accomplishes this by using the vagueness
- // index (Relationship::vagueness()) to first find less vague relationships and refine this one
- // using them, and then find more vague relationships and refine those to this.
-
- if (relationship.vagueness() != Relationship::minVagueness) {
- // We're not minimally vague (maximally specific), so try to refine ourselves based on what
- // we already know.
- for (Relationship& otherRelationship : relationships) {
- if (otherRelationship.vagueness() < relationship.vagueness()
- && otherRelationship.right()->isInt32Constant()) {
- Relationship newRelationship = relationship.filterConstant(otherRelationship);
- if (verbose && newRelationship != relationship)
- dataLog(" Refined to: ", newRelationship, " based on ", otherRelationship, "\n");
- relationship = newRelationship;
- }
- }
- }
-
- if (relationship.vagueness() != Relationship::maxVagueness) {
- // We're not maximally value (minimally specific), so try to refine other relationships
- // based on this one.
- for (Relationship& otherRelationship : relationships) {
- if (otherRelationship.vagueness() > relationship.vagueness()
- && otherRelationship.right()->isInt32Constant()) {
- Relationship newRelationship = otherRelationship.filterConstant(relationship);
- if (verbose && newRelationship != otherRelationship)
- dataLog(" Refined ", otherRelationship, " to: ", newRelationship, "\n");
- otherRelationship = newRelationship;
- }
- }
- }
- }
-
- Vector<Relationship> toAdd;
- bool found = false;
- for (Relationship& otherRelationship : relationships) {
- if (otherRelationship.sameNodesAs(relationship)) {
- if (Relationship filtered = otherRelationship.filter(relationship)) {
- ASSERT(filtered.left() == relationship.left());
- otherRelationship = filtered;
- found = true;
- }
- }
-
- // FIXME: Also add filtration over statements about constants. For example, if we have
- // @x == @c and @x != @d, where @d > @c, then we want to turn @x != @d into @x < @d.
-
- if (timeToLive && otherRelationship.kind() == Relationship::Equal) {
- if (verbose)
- dataLog(" Considering: ", otherRelationship, "\n");
-
- // We have:
- // @a op @b + C
- // @a == @c + D
- //
- // This implies:
- // @c + D op @b + C
- // @c op @b + C - D
- //
- // Where: @a == relationship.left(), @b == relationship.right(),
- // @a == otherRelationship.left(), @c == otherRelationship.right().
-
- if (otherRelationship.offset() != std::numeric_limits<int>::min()) {
- Relationship newRelationship = relationship;
- if (newRelationship.right() != otherRelationship.right()) {
- newRelationship.setLeft(otherRelationship.right());
- if (newRelationship.addToOffset(-otherRelationship.offset()))
- toAdd.append(newRelationship);
- }
- }
- }
- }
-
- if (!found)
- relationships.append(relationship);
-
- for (Relationship anotherRelationship : toAdd) {
- ASSERT(timeToLive);
- setOneSide(relationshipMap, anotherRelationship, timeToLive - 1);
- }
- }
-
- bool mergeTo(RelationshipMap& relationshipMap, BasicBlock* target)
- {
- if (verbose) {
- dataLog("Merging to ", pointerDump(target), ":\n");
- dataLog(" Incoming: ", listDump(sortedRelationships(relationshipMap)), "\n");
- dataLog(" At head: ", listDump(sortedRelationships(m_relationshipsAtHead[target])), "\n");
- }
-
- if (m_seenBlocks.add(target)) {
- // This is a new block. We copy subject to liveness pruning.
- auto isLive = [&] (Node* node) {
- if (node == m_zero)
- return true;
- return target->ssa->liveAtHead.contains(node);
- };
-
- for (auto& entry : relationshipMap) {
- if (!isLive(entry.key))
- continue;
-
- Vector<Relationship> values;
- for (Relationship relationship : entry.value) {
- ASSERT(relationship.left() == entry.key);
- if (isLive(relationship.right())) {
- if (verbose)
- dataLog(" Propagating ", relationship, "\n");
- values.append(relationship);
- }
- }
-
- std::sort(values.begin(), values.end());
- m_relationshipsAtHead[target].add(entry.key, values);
- }
- return true;
- }
-
- // Merge by intersecting. We have no notion of BOTTOM, so we use the omission of
- // relationships for a pair of nodes to mean TOP. The reason why we don't need BOTTOM
- // is (1) we just overapproximate contradictions and (2) a value never having been
- // assigned would only happen if we have not processed the node's predecessor. We
- // shouldn't process blocks until we have processed the block's predecessor because we
- // are using reverse postorder.
- Vector<Node*> toRemove;
- bool changed = false;
- for (auto& entry : m_relationshipsAtHead[target]) {
- auto iter = relationshipMap.find(entry.key);
- if (iter == relationshipMap.end()) {
- toRemove.append(entry.key);
- changed = true;
- continue;
- }
-
- Vector<Relationship> mergedRelationships;
- for (Relationship targetRelationship : entry.value) {
- for (Relationship sourceRelationship : iter->value) {
- if (verbose)
- dataLog(" Merging ", targetRelationship, " and ", sourceRelationship, ":\n");
- targetRelationship.merge(
- sourceRelationship,
- [&] (Relationship newRelationship) {
- if (verbose)
- dataLog(" Got ", newRelationship, "\n");
-
- // We need to filter() to avoid exponential explosion of identical
- // relationships. We do this here to avoid making setOneSide() do
- // more work, since we expect setOneSide() will be called more
- // frequently. Here's an example. At some point someone might start
- // with two relationships like @a > @b - C and @a < @b + D. Then
- // someone does a setRelationship() passing something that turns
- // both of these into @a == @b. Now we have @a == @b duplicated.
- // Let's say that this duplicate @a == @b ends up at the head of a
- // loop. If we didn't have this rule, then the loop would propagate
- // duplicate @a == @b's onto the existing duplicate @a == @b's.
- // There would be four pairs of @a == @b, each of which would
- // create a new @a == @b. Now we'd have four of these duplicates
- // and the next time around we'd have 8, then 16, etc. We avoid
- // this here by doing this filtration. That might be a bit of
- // overkill, since it's probably just the identical duplicate
- // relationship case we want' to avoid. But, I'll keep this until
- // we have evidence that this is a performance problem. Remember -
- // we are already dealing with a list that is pruned down to
- // relationships with identical left operand. It shouldn't be a
- // large list.
- bool found = false;
- for (Relationship& existingRelationship : mergedRelationships) {
- if (existingRelationship.sameNodesAs(newRelationship)) {
- Relationship filtered =
- existingRelationship.filter(newRelationship);
- if (filtered) {
- existingRelationship = filtered;
- found = true;
- break;
- }
- }
- }
-
- if (!found)
- mergedRelationships.append(newRelationship);
- });
- }
- }
- std::sort(mergedRelationships.begin(), mergedRelationships.end());
- if (entry.value == mergedRelationships)
- continue;
-
- entry.value = mergedRelationships;
- changed = true;
- }
- for (Node* node : toRemove)
- m_relationshipsAtHead[target].remove(node);
-
- return changed;
- }
-
- Vector<Relationship> sortedRelationships(const RelationshipMap& relationships)
- {
- Vector<Relationship> result;
- for (auto& entry : relationships)
- result.appendVector(entry.value);
- std::sort(result.begin(), result.end());
- return result;
- }
-
- Vector<Relationship> sortedRelationships()
- {
- return sortedRelationships(m_relationships);
- }
-
- Node* m_zero;
- RelationshipMap m_relationships;
- BlockSet m_seenBlocks;
- BlockMap<RelationshipMap> m_relationshipsAtHead;
- InsertionSet m_insertionSet;
-};
-
-} // anonymous namespace
-
-bool performIntegerRangeOptimization(Graph& graph)
-{
- SamplingRegion samplingRegion("DFG Integer Range Optimization Phase");
- return runPhase<IntegerRangeOptimizationPhase>(graph);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGIntegerRangeOptimizationPhase.h b/Source/JavaScriptCore/dfg/DFGIntegerRangeOptimizationPhase.h
deleted file mode 100644
index fe0615e39..000000000
--- a/Source/JavaScriptCore/dfg/DFGIntegerRangeOptimizationPhase.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGIntegerRangeOptimizationPhase_h
-#define DFGIntegerRangeOptimizationPhase_h
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-class Graph;
-
-// Removes overflow checks and out-of-bounds checks by doing a forward flow analysis to prove
-// inequalities. It will remove the overflow and bounds checks in loops like:
-//
-// for (var i = 0; i < array.length; ++i) array[i];
-
-bool performIntegerRangeOptimization(Graph&);
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGIntegerRangeOptimizationPhase_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.cpp b/Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.cpp
index 9228001cc..d71a7cbda 100644
--- a/Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,12 +28,11 @@
#if ENABLE(DFG_JIT)
-#include "DFGBlockSetInlines.h"
#include "DFGClobberize.h"
#include "DFGGraph.h"
#include "DFGInsertionSet.h"
#include "DFGPhase.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -51,7 +50,7 @@ public:
{
ASSERT(m_graph.m_form != SSA);
- BlockSet blocksThatNeedInvalidationPoints;
+ BitVector blocksThatNeedInvalidationPoints;
for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
BasicBlock* block = m_graph.block(blockIndex);
@@ -64,13 +63,17 @@ public:
// Note: this assumes that control flow occurs at bytecode instruction boundaries.
if (m_originThatHadFire.isSet()) {
for (unsigned i = block->numSuccessors(); i--;)
- blocksThatNeedInvalidationPoints.add(block->successor(i));
+ blocksThatNeedInvalidationPoints.set(block->successor(i)->index);
}
m_insertionSet.execute(block);
}
-
- for (BasicBlock* block : blocksThatNeedInvalidationPoints.iterable(m_graph)) {
+
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ if (!blocksThatNeedInvalidationPoints.get(blockIndex))
+ continue;
+
+ BasicBlock* block = m_graph.block(blockIndex);
insertInvalidationCheck(0, block->at(0));
m_insertionSet.execute(block);
}
@@ -81,18 +84,18 @@ public:
private:
void handle(unsigned nodeIndex, Node* node)
{
- if (m_originThatHadFire.isSet() && m_originThatHadFire != node->origin.forExit) {
+ if (m_originThatHadFire.isSet() && m_originThatHadFire != node->codeOrigin) {
insertInvalidationCheck(nodeIndex, node);
m_originThatHadFire = CodeOrigin();
}
if (writesOverlap(m_graph, node, Watchpoint_fire))
- m_originThatHadFire = node->origin.forExit;
+ m_originThatHadFire = node->codeOrigin;
}
void insertInvalidationCheck(unsigned nodeIndex, Node* node)
{
- m_insertionSet.insertNode(nodeIndex, SpecNone, InvalidationPoint, node->origin);
+ m_insertionSet.insertNode(nodeIndex, SpecNone, InvalidationPoint, node->codeOrigin);
}
CodeOrigin m_originThatHadFire;
diff --git a/Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.h b/Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.h
index 4c49cc96a..a135fdc3a 100644
--- a/Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGInvalidationPointInjectionPhase_h
#define DFGInvalidationPointInjectionPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGJITCode.cpp b/Source/JavaScriptCore/dfg/DFGJITCode.cpp
index db044e53e..c53653f8f 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCode.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJITCode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,17 +29,11 @@
#if ENABLE(DFG_JIT)
#include "CodeBlock.h"
-#include "JSCInlines.h"
-#include "TrackedReferences.h"
namespace JSC { namespace DFG {
JITCode::JITCode()
: DirectJITCode(DFGJIT)
-#if ENABLE(FTL_JIT)
- , osrEntryRetry(0)
- , abandonOSREntry(false)
-#endif // ENABLE(FTL_JIT)
{
}
@@ -83,8 +77,23 @@ void JITCode::reconstruct(
reconstruct(codeBlock, codeOrigin, streamIndex, recoveries);
result = Operands<JSValue>(OperandsLike, recoveries);
- for (size_t i = result.size(); i--;)
+ for (size_t i = result.size(); i--;) {
+ int operand = result.operandForIndex(i);
+
+ if (operandIsArgument(operand)
+ && !VirtualRegister(operand).toArgument()
+ && codeBlock->codeType() == FunctionCode
+ && codeBlock->specializationKind() == CodeForConstruct) {
+ // Ugh. If we're in a constructor, the 'this' argument may hold garbage. It will
+ // also never be used. It doesn't matter what we put into the value for this,
+ // but it has to be an actual value that can be grokked by subsequent DFG passes,
+ // so we sanitize it here by turning it into Undefined.
+ result[i] = jsUndefined();
+ continue;
+ }
+
result[i] = recoveries[i].recover(exec);
+ }
}
#if ENABLE(FTL_JIT)
@@ -147,7 +156,6 @@ void JITCode::setOptimizationThresholdBasedOnCompilationResult(
switch (result) {
case CompilationSuccessful:
optimizeNextInvocation(codeBlock);
- codeBlock->baselineVersion()->m_hasBeenCompiledWithFTL = true;
return;
case CompilationFailed:
dontOptimizeAnytimeSoon(codeBlock);
@@ -171,18 +179,6 @@ void JITCode::setOptimizationThresholdBasedOnCompilationResult(
}
#endif // ENABLE(FTL_JIT)
-void JITCode::validateReferences(const TrackedReferences& trackedReferences)
-{
- common.validateReferences(trackedReferences);
-
- for (OSREntryData& entry : osrEntry) {
- for (unsigned i = entry.m_expectedValues.size(); i--;)
- entry.m_expectedValues[i].validateReferences(trackedReferences);
- }
-
- minifiedDFG.validateReferences(trackedReferences);
-}
-
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGJITCode.h b/Source/JavaScriptCore/dfg/DFGJITCode.h
index 266a7cece..0e771e046 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCode.h
+++ b/Source/JavaScriptCore/dfg/DFGJITCode.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGJITCode_h
#define DFGJITCode_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "CompilationResult.h"
@@ -38,11 +40,7 @@
#include "JITCode.h"
#include <wtf/SegmentedVector.h>
-namespace JSC {
-
-class TrackedReferences;
-
-namespace DFG {
+namespace JSC { namespace DFG {
class JITCompiler;
@@ -111,8 +109,6 @@ public:
void setOptimizationThresholdBasedOnCompilationResult(CodeBlock*, CompilationResult);
#endif // ENABLE(FTL_JIT)
- void validateReferences(const TrackedReferences&) override;
-
void shrinkToFit();
private:
@@ -126,11 +122,8 @@ public:
DFG::VariableEventStream variableEventStream;
DFG::MinifiedGraph minifiedDFG;
#if ENABLE(FTL_JIT)
- uint8_t nestedTriggerIsSet { 0 };
- UpperTierExecutionCounter tierUpCounter;
+ ExecutionCounter tierUpCounter;
RefPtr<CodeBlock> osrEntryBlock;
- unsigned osrEntryRetry;
- bool abandonOSREntry;
#endif // ENABLE(FTL_JIT)
};
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
index b58d67e1f..2934d2ba9 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,7 +28,6 @@
#if ENABLE(DFG_JIT)
-#include "ArityCheckFailReturnThunks.h"
#include "CodeBlock.h"
#include "DFGFailedFinalizer.h"
#include "DFGInlineCacheWrapperInlines.h"
@@ -41,10 +40,8 @@
#include "DFGSpeculativeJIT.h"
#include "DFGThunks.h"
#include "JSCJSValueInlines.h"
-#include "LinkBuffer.h"
-#include "MaxFrameExtentForSlowPathCall.h"
-#include "JSCInlines.h"
#include "VM.h"
+#include "LinkBuffer.h"
namespace JSC { namespace DFG {
@@ -55,7 +52,7 @@ JITCompiler::JITCompiler(Graph& dfg)
, m_blockHeads(dfg.numBlocks())
{
if (shouldShowDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
- m_disassembler = std::make_unique<Disassembler>(dfg);
+ m_disassembler = adoptPtr(new Disassembler(dfg));
}
JITCompiler::~JITCompiler()
@@ -95,14 +92,15 @@ void JITCompiler::linkOSRExits()
void JITCompiler::compileEntry()
{
// This code currently matches the old JIT. In the function header we need to
- // save return address and call frame via the prologue and perform a fast stack check.
+ // pop the return address (since we do not allow any recursion on the machine
+ // stack), and perform a fast stack check.
// FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
// We'll need to convert the remaining cti_ style calls (specifically the stack
// check) which will be dependent on stack layout. (We'd need to account for this in
// both normal return code and when jumping to an exception handler).
- emitFunctionPrologue();
+ preserveReturnAddressAfterCall(GPRInfo::regT2);
+ emitPutReturnPCToCallFrameHeader(GPRInfo::regT2);
emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
- jitAssertTagsInPlace();
}
void JITCompiler::compileBody()
@@ -116,40 +114,32 @@ void JITCompiler::compileBody()
void JITCompiler::compileExceptionHandlers()
{
- if (!m_exceptionChecksWithCallFrameRollback.empty()) {
- m_exceptionChecksWithCallFrameRollback.link(this);
-
- // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
- move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
- addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
+ if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty())
+ return;
-#if CPU(X86)
- // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
- poke(GPRInfo::argumentGPR0);
- poke(GPRInfo::argumentGPR1, 1);
-#endif
- m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame));
+ Jump doLookup;
- jumpToExceptionHandler();
+ if (!m_exceptionChecksWithCallFrameRollback.empty()) {
+ m_exceptionChecksWithCallFrameRollback.link(this);
+ emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR0);
+ doLookup = jump();
}
- if (!m_exceptionChecks.empty()) {
+ if (!m_exceptionChecks.empty())
m_exceptionChecks.link(this);
- // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
- move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+ // lookupExceptionHandler is passed one argument, the exec (the CallFrame*).
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+
+ if (doLookup.isSet())
+ doLookup.link(this);
#if CPU(X86)
- // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
- poke(GPRInfo::argumentGPR0);
- poke(GPRInfo::argumentGPR1, 1);
+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
+ poke(GPRInfo::argumentGPR0);
#endif
- m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
-
- jumpToExceptionHandler();
- }
+ m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
+ jumpToExceptionHandler();
}
void JITCompiler::link(LinkBuffer& linkBuffer)
@@ -158,18 +148,15 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
- if (!m_graph.m_plan.inlineCallFrames->isEmpty())
- m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
-
-#if USE(JSVALUE32_64)
- m_jitCode->common.doubleConstants = WTF::move(m_graph.m_doubleConstants);
-#endif
+ if (!m_graph.m_inlineCallFrames->isEmpty())
+ m_jitCode->common.inlineCallFrames = m_graph.m_inlineCallFrames.release();
- m_graph.registerFrozenValues();
+ m_jitCode->common.machineCaptureStart = m_graph.m_machineCaptureStart;
+ m_jitCode->common.slowArguments = std::move(m_graph.m_slowArguments);
BitVector usedJumpTables;
- for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
- SwitchData& data = **iter;
+ for (unsigned i = m_graph.m_switchData.size(); i--;) {
+ SwitchData& data = m_graph.m_switchData[i];
if (!data.didUseJumpTable)
continue;
@@ -180,14 +167,14 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
usedJumpTables.set(data.switchTableIndex);
SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
- table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
+ table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough->index]);
table.ctiOffsets.grow(table.branchOffsets.size());
for (unsigned j = table.ctiOffsets.size(); j--;)
table.ctiOffsets[j] = table.ctiDefault;
for (unsigned j = data.cases.size(); j--;) {
SwitchCase& myCase = data.cases[j];
- table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] =
- linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
+ table.ctiOffsets[myCase.value.switchLookupValue() - table.min] =
+ linkBuffer.locationOf(m_blockHeads[myCase.target->index]);
}
}
@@ -201,8 +188,8 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
// NOTE: we cannot clear string switch tables because (1) we're running concurrently
// and we cannot deref StringImpl's and (2) it would be weird to deref those
// StringImpl's since we refer to them.
- for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) {
- SwitchData& data = **switchDataIter;
+ for (unsigned i = m_graph.m_switchData.size(); i--;) {
+ SwitchData& data = m_graph.m_switchData[i];
if (!data.didUseJumpTable)
continue;
@@ -210,7 +197,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
continue;
StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
- table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
+ table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough->index]);
StringJumpTable::StringOffsetTable::iterator iter;
StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
for (iter = table.offsetTable.begin(); iter != end; ++iter)
@@ -219,7 +206,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
SwitchCase& myCase = data.cases[j];
iter = table.offsetTable.find(myCase.value.stringImpl());
RELEASE_ASSERT(iter != end);
- iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
+ iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target->index]);
}
}
@@ -241,13 +228,17 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label()));
}
+ m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size());
for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
- JSCallRecord& record = m_jsCalls[i];
- CallLinkInfo& info = *record.m_info;
- linkBuffer.link(record.m_slowCall, FunctionPtr(m_vm->getCTIStub(linkCallThunkGenerator).code().executableAddress()));
- info.setCallLocations(linkBuffer.locationOfNearCall(record.m_slowCall),
- linkBuffer.locationOf(record.m_targetToCheck),
- linkBuffer.locationOfNearCall(record.m_fastCall));
+ CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
+ info.callType = m_jsCalls[i].m_callType;
+ info.isDFG = true;
+ info.codeOrigin = m_jsCalls[i].m_codeOrigin;
+ linkBuffer.link(m_jsCalls[i].m_slowCall, FunctionPtr((m_vm->getCTIStub(info.callType == CallLinkInfo::Construct ? linkConstructThunkGenerator : linkCallThunkGenerator)).code().executableAddress()));
+ info.callReturnLocation = linkBuffer.locationOfNearCall(m_jsCalls[i].m_slowCall);
+ info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck);
+ info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall);
+ info.calleeGPR = static_cast<unsigned>(m_jsCalls[i].m_callee);
}
MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
@@ -286,30 +277,10 @@ void JITCompiler::compile()
setStartOfCode();
compileEntry();
- m_speculative = std::make_unique<SpeculativeJIT>(*this);
-
- // Plant a check that sufficient space is available in the JSStack.
- addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
- Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);
-
- addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
- checkStackPointerAlignment();
+ m_speculative = adoptPtr(new SpeculativeJIT(*this));
compileBody();
setEndOfMainPath();
- // === Footer code generation ===
- //
- // Generate the stack overflow handling; if the stack check in the entry head fails,
- // we need to call out to a helper function to throw the StackOverflowError.
- stackOverflow.link(this);
-
- emitStoreCodeOrigin(CodeOrigin(0));
-
- if (maxFrameExtentForSlowPathCall)
- addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
-
- m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
-
// Generate slow path code.
m_speculative->runSlowPathGenerators();
@@ -319,10 +290,13 @@ void JITCompiler::compile()
// Create OSR entry trampolines if necessary.
m_speculative->createOSREntries();
setEndOfCode();
+}
- auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
+void JITCompiler::link()
+{
+ OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail));
if (linkBuffer->didFailToAllocate()) {
- m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
+ m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan));
return;
}
@@ -334,8 +308,8 @@ void JITCompiler::compile()
disassemble(*linkBuffer);
- m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
- m_graph.m_plan, m_jitCode.release(), WTF::move(linkBuffer));
+ m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer(
+ m_graph.m_plan, m_jitCode.release(), linkBuffer.release()));
}
void JITCompiler::compileFunction()
@@ -351,33 +325,30 @@ void JITCompiler::compileFunction()
// so enter after this.
Label fromArityCheck(this);
// Plant a check that sufficient space is available in the JSStack.
- addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
- Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);
+ addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit()).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
+ Jump stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), GPRInfo::regT1);
+ // Return here after stack check.
+ Label fromStackCheck = label();
- // Move the stack pointer down to accommodate locals
- addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
- checkStackPointerAlignment();
// === Function body code generation ===
- m_speculative = std::make_unique<SpeculativeJIT>(*this);
+ m_speculative = adoptPtr(new SpeculativeJIT(*this));
compileBody();
setEndOfMainPath();
// === Function footer code generation ===
//
- // Generate code to perform the stack overflow handling (if the stack check in
+ // Generate code to perform the slow stack check (if the fast one in
// the function header fails), and generate the entry point with arity check.
//
- // Generate the stack overflow handling; if the stack check in the function head fails,
- // we need to call out to a helper function to throw the StackOverflowError.
- stackOverflow.link(this);
+ // Generate the stack check; if the fast check in the function head fails,
+ // we need to call out to a helper function to check whether more space is available.
+ // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
+ stackCheck.link(this);
emitStoreCodeOrigin(CodeOrigin(0));
-
- if (maxFrameExtentForSlowPathCall)
- addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
-
- m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
+ m_speculative->callOperationWithCallFrameRollbackOnException(operationStackCheck, m_codeBlock);
+ jump(fromStackCheck);
// The fast entry point into a function does not check the correct number of arguments
// have been passed to the call (we only use the fast entry point where we can statically
@@ -390,23 +361,9 @@ void JITCompiler::compileFunction()
load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1);
branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
emitStoreCodeOrigin(CodeOrigin(0));
- if (maxFrameExtentForSlowPathCall)
- addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0);
- if (maxFrameExtentForSlowPathCall)
- addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
branchTest32(Zero, GPRInfo::regT0).linkTo(fromArityCheck, this);
emitStoreCodeOrigin(CodeOrigin(0));
- GPRReg thunkReg;
-#if USE(JSVALUE64)
- thunkReg = GPRInfo::regT7;
-#else
- thunkReg = GPRInfo::regT5;
-#endif
- CodeLocationLabel* arityThunkLabels =
- m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters());
- move(TrustedImmPtr(arityThunkLabels), thunkReg);
- loadPtr(BaseIndex(thunkReg, GPRInfo::regT0, timesPtr()), thunkReg);
m_callArityFixup = call();
jump(fromArityCheck);
@@ -419,11 +376,14 @@ void JITCompiler::compileFunction()
// Create OSR entry trampolines if necessary.
m_speculative->createOSREntries();
setEndOfCode();
+}
+void JITCompiler::linkFunction()
+{
// === Link ===
- auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
+ OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail));
if (linkBuffer->didFailToAllocate()) {
- m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
+ m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan));
return;
}
link(*linkBuffer);
@@ -432,94 +392,25 @@ void JITCompiler::compileFunction()
m_jitCode->shrinkToFit();
codeBlock()->shrinkToFit(CodeBlock::LateShrink);
- linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress()));
+ linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixup)).code().executableAddress()));
disassemble(*linkBuffer);
MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck);
- m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
- m_graph.m_plan, m_jitCode.release(), WTF::move(linkBuffer), withArityCheck);
+ m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer(
+ m_graph.m_plan, m_jitCode.release(), linkBuffer.release(), withArityCheck));
}
void JITCompiler::disassemble(LinkBuffer& linkBuffer)
{
- if (shouldShowDisassembly()) {
+ if (shouldShowDisassembly())
m_disassembler->dump(linkBuffer);
- linkBuffer.didAlreadyDisassemble();
- }
if (m_graph.m_plan.compilation)
m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
}
-#if USE(JSVALUE32_64)
-void* JITCompiler::addressOfDoubleConstant(Node* node)
-{
- double value = node->asNumber();
- int64_t valueBits = bitwise_cast<int64_t>(value);
- auto it = m_graph.m_doubleConstantsMap.find(valueBits);
- if (it != m_graph.m_doubleConstantsMap.end())
- return it->second;
-
- if (!m_graph.m_doubleConstants)
- m_graph.m_doubleConstants = std::make_unique<Bag<double>>();
-
- double* addressInConstantPool = m_graph.m_doubleConstants->add();
- *addressInConstantPool = value;
- m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool;
- return addressInConstantPool;
-}
-#endif
-
-void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
-{
- // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
- if (!basicBlock.intersectionOfCFAHasVisited)
- return;
-
- OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead));
-
- entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead;
-
- // Fix the expected values: in our protocol, a dead variable will have an expected
- // value of (None, []). But the old JIT may stash some values there. So we really
- // need (Top, TOP).
- for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
- Node* node = basicBlock.variablesAtHead.argument(argument);
- if (!node || !node->shouldGenerate())
- entry->m_expectedValues.argument(argument).makeHeapTop();
- }
- for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
- Node* node = basicBlock.variablesAtHead.local(local);
- if (!node || !node->shouldGenerate())
- entry->m_expectedValues.local(local).makeHeapTop();
- else {
- VariableAccessData* variable = node->variableAccessData();
- entry->m_machineStackUsed.set(variable->machineLocal().toLocal());
-
- switch (variable->flushFormat()) {
- case FlushedDouble:
- entry->m_localsForcedDouble.set(local);
- break;
- case FlushedInt52:
- entry->m_localsForcedMachineInt.set(local);
- break;
- default:
- break;
- }
-
- if (variable->local() != variable->machineLocal()) {
- entry->m_reshufflings.append(
- OSREntryReshuffling(
- variable->local().offset(), variable->machineLocal().offset()));
- }
- }
- }
-
- entry->m_reshufflings.shrinkToFit();
-}
-
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.h b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
index 9710ebaf4..b582cea4f 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.h
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,6 +29,7 @@
#if ENABLE(DFG_JIT)
#include "CCallHelpers.h"
+#include "CallFrameInlines.h"
#include "CodeBlock.h"
#include "DFGDisassembler.h"
#include "DFGGraph.h"
@@ -111,6 +112,9 @@ public:
void compile();
void compileFunction();
+ void link();
+ void linkFunction();
+
// Accessors for properties.
Graph& graph() { return m_graph; }
@@ -183,7 +187,6 @@ public:
// Add a call out from JIT code, with a fast exception check that tests if the return value is zero.
void fastExceptionCheck()
{
- callExceptionFuzz();
m_exceptionChecks.append(branchTestPtr(Zero, GPRInfo::returnValueGPR));
}
@@ -196,7 +199,12 @@ public:
}
#if USE(JSVALUE32_64)
- void* addressOfDoubleConstant(Node*);
+ void* addressOfDoubleConstant(Node* node)
+ {
+ ASSERT(m_graph.isNumberConstant(node));
+ unsigned constantIndex = node->constantNumber();
+ return &(codeBlock()->constantRegister(FirstConstantRegisterIndex + constantIndex));
+ }
#endif
void addGetById(const JITGetByIdGenerator& gen, SlowPathGenerator* slowPath)
@@ -213,15 +221,10 @@ public:
{
m_ins.append(record);
}
-
- unsigned currentJSCallIndex() const
- {
- return m_jsCalls.size();
- }
- void addJSCall(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, CallLinkInfo* info)
+ void addJSCall(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, CallLinkInfo::CallType callType, GPRReg callee, CodeOrigin codeOrigin)
{
- m_jsCalls.append(JSCallRecord(fastCall, slowCall, targetToCheck, info));
+ m_jsCalls.append(JSCallRecord(fastCall, slowCall, targetToCheck, callType, callee, codeOrigin));
}
void addWeakReference(JSCell* target)
@@ -242,32 +245,54 @@ public:
addWeakReference(weakPtr);
return result;
}
-
- template<typename T>
- Jump branchWeakStructure(RelationalCondition cond, T left, Structure* weakStructure)
- {
-#if USE(JSVALUE64)
- Jump result = branch32(cond, left, TrustedImm32(weakStructure->id()));
- addWeakReference(weakStructure);
- return result;
-#else
- return branchWeakPtr(cond, left, weakStructure);
-#endif
- }
-
- template<typename T>
- Jump branchStructurePtr(RelationalCondition cond, T left, Structure* structure)
+
+ void noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
{
-#if USE(JSVALUE64)
- return branch32(cond, left, TrustedImm32(structure->id()));
-#else
- return branchPtr(cond, left, TrustedImmPtr(structure));
-#endif
+ // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
+ if (!basicBlock.cfaHasVisited)
+ return;
+
+ OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead));
+
+ entry->m_expectedValues = basicBlock.valuesAtHead;
+
+ // Fix the expected values: in our protocol, a dead variable will have an expected
+ // value of (None, []). But the old JIT may stash some values there. So we really
+ // need (Top, TOP).
+ for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
+ Node* node = basicBlock.variablesAtHead.argument(argument);
+ if (!node || !node->shouldGenerate())
+ entry->m_expectedValues.argument(argument).makeHeapTop();
+ }
+ for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
+ Node* node = basicBlock.variablesAtHead.local(local);
+ if (!node || !node->shouldGenerate())
+ entry->m_expectedValues.local(local).makeHeapTop();
+ else {
+ VariableAccessData* variable = node->variableAccessData();
+ switch (variable->flushFormat()) {
+ case FlushedDouble:
+ entry->m_localsForcedDouble.set(local);
+ break;
+ case FlushedInt52:
+ entry->m_localsForcedMachineInt.set(local);
+ break;
+ default:
+ break;
+ }
+
+ if (variable->local() != variable->machineLocal()) {
+ entry->m_reshufflings.append(
+ OSREntryReshuffling(
+ variable->local().offset(), variable->machineLocal().offset()));
+ }
+ }
+ }
+
+ entry->m_reshufflings.shrinkToFit();
}
-
- void noticeOSREntry(BasicBlock&, JITCompiler::Label blockHead, LinkBuffer&);
- RefPtr<JITCode> jitCode() { return m_jitCode; }
+ PassRefPtr<JITCode> jitCode() { return m_jitCode; }
Vector<Label>& blockHeads() { return m_blockHeads; }
@@ -287,7 +312,7 @@ private:
// The dataflow graph currently being generated.
Graph& m_graph;
- std::unique_ptr<Disassembler> m_disassembler;
+ OwnPtr<Disassembler> m_disassembler;
RefPtr<JITCode> m_jitCode;
@@ -300,18 +325,22 @@ private:
Vector<Label> m_blockHeads;
struct JSCallRecord {
- JSCallRecord(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, CallLinkInfo* info)
+ JSCallRecord(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, CallLinkInfo::CallType callType, GPRReg callee, CodeOrigin codeOrigin)
: m_fastCall(fastCall)
, m_slowCall(slowCall)
, m_targetToCheck(targetToCheck)
- , m_info(info)
+ , m_callType(callType)
+ , m_callee(callee)
+ , m_codeOrigin(codeOrigin)
{
}
Call m_fastCall;
Call m_slowCall;
DataLabelPtr m_targetToCheck;
- CallLinkInfo* m_info;
+ CallLinkInfo::CallType m_callType;
+ GPRReg m_callee;
+ CodeOrigin m_codeOrigin;
};
Vector<InlineCacheWrapper<JITGetByIdGenerator>, 4> m_getByIds;
@@ -323,7 +352,7 @@ private:
Call m_callArityFixup;
Label m_arityCheck;
- std::unique_ptr<SpeculativeJIT> m_speculative;
+ OwnPtr<SpeculativeJIT> m_speculative;
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp b/Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp
index 836c0e048..b7ea594c6 100644
--- a/Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,18 +29,15 @@
#if ENABLE(DFG_JIT)
#include "CodeBlock.h"
-#include "CodeBlockWithJITType.h"
#include "DFGCommon.h"
#include "DFGPlan.h"
-#include "JSCInlines.h"
-#include "ProfilerDatabase.h"
namespace JSC { namespace DFG {
-JITFinalizer::JITFinalizer(Plan& plan, PassRefPtr<JITCode> jitCode, std::unique_ptr<LinkBuffer> linkBuffer, MacroAssemblerCodePtr withArityCheck)
+JITFinalizer::JITFinalizer(Plan& plan, PassRefPtr<JITCode> jitCode, PassOwnPtr<LinkBuffer> linkBuffer, MacroAssemblerCodePtr withArityCheck)
: Finalizer(plan)
, m_jitCode(jitCode)
- , m_linkBuffer(WTF::move(linkBuffer))
+ , m_linkBuffer(linkBuffer)
, m_withArityCheck(withArityCheck)
{
}
@@ -49,18 +46,10 @@ JITFinalizer::~JITFinalizer()
{
}
-size_t JITFinalizer::codeSize()
-{
- return m_linkBuffer->size();
-}
-
bool JITFinalizer::finalize()
{
- m_jitCode->initializeCodeRef(
- FINALIZE_DFG_CODE(*m_linkBuffer, ("DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock.get(), JITCode::DFGJIT)).data())),
- MacroAssemblerCodePtr());
-
- m_plan.codeBlock->setJITCode(m_jitCode);
+ m_jitCode->initializeCodeRef(m_linkBuffer->finalizeCodeWithoutDisassembly());
+ m_plan.codeBlock->setJITCode(m_jitCode, MacroAssemblerCodePtr());
finalizeCommon();
@@ -70,10 +59,8 @@ bool JITFinalizer::finalize()
bool JITFinalizer::finalizeFunction()
{
RELEASE_ASSERT(!m_withArityCheck.isEmptyValue());
- m_jitCode->initializeCodeRef(
- FINALIZE_DFG_CODE(*m_linkBuffer, ("DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock.get(), JITCode::DFGJIT)).data())),
- m_withArityCheck);
- m_plan.codeBlock->setJITCode(m_jitCode);
+ m_jitCode->initializeCodeRef(m_linkBuffer->finalizeCodeWithoutDisassembly());
+ m_plan.codeBlock->setJITCode(m_jitCode, m_withArityCheck);
finalizeCommon();
@@ -88,9 +75,6 @@ void JITFinalizer::finalizeCommon()
if (m_plan.compilation)
m_plan.vm.m_perBytecodeProfiler->addCompilation(m_plan.compilation);
-
- if (!m_plan.willTryToTierUp)
- m_plan.codeBlock->baselineVersion()->m_didFailFTLCompilation = true;
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGJITFinalizer.h b/Source/JavaScriptCore/dfg/DFGJITFinalizer.h
index 110442fe4..5c7c82b66 100644
--- a/Source/JavaScriptCore/dfg/DFGJITFinalizer.h
+++ b/Source/JavaScriptCore/dfg/DFGJITFinalizer.h
@@ -26,6 +26,8 @@
#ifndef DFGJITFinalizer_h
#define DFGJITFinalizer_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGFinalizer.h"
@@ -37,10 +39,9 @@ namespace JSC { namespace DFG {
class JITFinalizer : public Finalizer {
public:
- JITFinalizer(Plan&, PassRefPtr<JITCode>, std::unique_ptr<LinkBuffer>, MacroAssemblerCodePtr withArityCheck = MacroAssemblerCodePtr(MacroAssemblerCodePtr::EmptyValue));
+ JITFinalizer(Plan&, PassRefPtr<JITCode>, PassOwnPtr<LinkBuffer>, MacroAssemblerCodePtr withArityCheck = MacroAssemblerCodePtr(MacroAssemblerCodePtr::EmptyValue));
virtual ~JITFinalizer();
- virtual size_t codeSize() override;
virtual bool finalize() override;
virtual bool finalizeFunction() override;
@@ -48,7 +49,7 @@ private:
void finalizeCommon();
RefPtr<JITCode> m_jitCode;
- std::unique_ptr<LinkBuffer> m_linkBuffer;
+ OwnPtr<LinkBuffer> m_linkBuffer;
MacroAssemblerCodePtr m_withArityCheck;
};
diff --git a/Source/JavaScriptCore/dfg/DFGJumpReplacement.cpp b/Source/JavaScriptCore/dfg/DFGJumpReplacement.cpp
index fa002f493..033985e88 100644
--- a/Source/JavaScriptCore/dfg/DFGJumpReplacement.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJumpReplacement.cpp
@@ -29,7 +29,6 @@
#if ENABLE(DFG_JIT)
#include "MacroAssembler.h"
-#include "JSCInlines.h"
#include "Options.h"
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGLICMPhase.cpp b/Source/JavaScriptCore/dfg/DFGLICMPhase.cpp
index 62cde8adb..64651309e 100644
--- a/Source/JavaScriptCore/dfg/DFGLICMPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGLICMPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -38,7 +38,7 @@
#include "DFGInsertionSet.h"
#include "DFGPhase.h"
#include "DFGSafeToExecute.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -62,14 +62,13 @@ class LICMPhase : public Phase {
public:
LICMPhase(Graph& graph)
: Phase(graph, "LICM")
- , m_state(graph)
, m_interpreter(graph, m_state)
{
}
bool run()
{
- DFG_ASSERT(m_graph, nullptr, m_graph.m_form == SSA);
+ ASSERT(m_graph.m_form == SSA);
m_graph.m_dominators.computeIfNecessary(m_graph);
m_graph.m_naturalLoops.computeIfNecessary(m_graph);
@@ -82,28 +81,12 @@ public:
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
-
- // Skip blocks that are proved to not execute.
- // FIXME: This shouldn't be needed.
- // https://bugs.webkit.org/show_bug.cgi?id=128584
- if (!block->cfaHasVisited)
- continue;
-
const NaturalLoop* loop = m_graph.m_naturalLoops.innerMostLoopOf(block);
if (!loop)
continue;
LoopData& data = m_data[loop->index()];
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
-
- // Don't look beyond parts of the code that definitely always exit.
- // FIXME: This shouldn't be needed.
- // https://bugs.webkit.org/show_bug.cgi?id=128584
- if (node->op() == ForceOSRExit)
- break;
-
- addWrites(m_graph, node, data.writes);
- }
+ for (unsigned nodeIndex = block->size(); nodeIndex--;)
+ addWrites(m_graph, block->at(nodeIndex), data.writes);
}
// For each loop:
@@ -124,17 +107,11 @@ public:
BasicBlock* predecessor = header->predecessors[i];
if (m_graph.m_dominators.dominates(header, predecessor))
continue;
- DFG_ASSERT(m_graph, nullptr, !preHeader || preHeader == predecessor);
+ RELEASE_ASSERT(!preHeader || preHeader == predecessor);
preHeader = predecessor;
}
- DFG_ASSERT(m_graph, preHeader->terminal(), preHeader->terminal()->op() == Jump);
-
- // We should validate the pre-header. If we placed forExit origins on nodes only if
- // at the top of that node it is legal to exit, then we would simply check if Jump
- // had a forExit. We should disable hoisting to pre-headers that don't validate.
- // Or, we could only allow hoisting of things that definitely don't exit.
- // FIXME: https://bugs.webkit.org/show_bug.cgi?id=145204
+ RELEASE_ASSERT(preHeader->last()->op() == Jump);
data.preHeader = preHeader;
}
@@ -156,9 +133,16 @@ public:
//
// For maximum profit, we walk blocks in DFS order to ensure that we generally
// tend to hoist dominators before dominatees.
+ Vector<BasicBlock*> depthFirst;
+ m_graph.getBlocksInDepthFirstOrder(depthFirst);
Vector<const NaturalLoop*> loopStack;
bool changed = false;
- for (BasicBlock* block : m_graph.blocksInPreOrder()) {
+ for (
+ unsigned depthFirstIndex = 0;
+ depthFirstIndex < depthFirst.size();
+ ++depthFirstIndex) {
+
+ BasicBlock* block = depthFirst[depthFirstIndex];
const NaturalLoop* loop = m_graph.m_naturalLoops.innerMostLoopOf(block);
if (!loop)
continue;
@@ -219,47 +203,6 @@ private:
return false;
}
- // FIXME: At this point if the hoisting of the full node fails but the node has type checks,
- // we could still hoist just the checks.
- // https://bugs.webkit.org/show_bug.cgi?id=144525
-
- // FIXME: If a node has a type check - even something like a CheckStructure - then we should
- // only hoist the node if we know that it will execute on every loop iteration or if we know
- // that the type check will always succeed at the loop pre-header through some other means
- // (like looking at prediction propagation results). Otherwise, we might make a mistake like
- // this:
- //
- // var o = ...; // sometimes null and sometimes an object with structure S1.
- // for (...) {
- // if (o)
- // ... = o.f; // CheckStructure and GetByOffset, which we will currently hoist.
- // }
- //
- // When we encounter such code, we'll hoist the CheckStructure and GetByOffset and then we
- // will have a recompile. We'll then end up thinking that the get_by_id needs to be
- // polymorphic, which is false.
- //
- // We can counter this by either having a control flow equivalence check, or by consulting
- // prediction propagation to see if the check would always succeed. Prediction propagation
- // would not be enough for things like:
- //
- // var p = ...; // some boolean predicate
- // var o = {};
- // if (p)
- // o.f = 42;
- // for (...) {
- // if (p)
- // ... = o.f;
- // }
- //
- // Prediction propagation can't tell us anything about the structure, and the CheckStructure
- // will appear to be hoistable because the loop doesn't clobber structures. The cell check
- // in the CheckStructure will be hoistable though, since prediction propagation can tell us
- // that o is always SpecFinalObject. In cases like this, control flow equivalence is the
- // only effective guard.
- //
- // https://bugs.webkit.org/show_bug.cgi?id=144527
-
if (readsOverlap(m_graph, node, data.writes)) {
if (verbose) {
dataLog(
@@ -284,12 +227,9 @@ private:
"\n");
}
- data.preHeader->insertBeforeTerminal(node);
- node->owner = data.preHeader;
- NodeOrigin originalOrigin = node->origin;
- node->origin.forExit = data.preHeader->terminal()->origin.forExit;
- if (!node->origin.semantic.isSet())
- node->origin.semantic = node->origin.forExit;
+ data.preHeader->insertBeforeLast(node);
+ node->misc.owner = data.preHeader;
+ node->codeOriginForExitTarget = data.preHeader->last()->codeOriginForExitTarget;
// Modify the states at the end of the preHeader of the loop we hoisted to,
// and all pre-headers inside the loop.
@@ -310,9 +250,9 @@ private:
// It just so happens that all of the nodes we currently know how to hoist
// don't have var-arg children. That may change and then we can fix this
// code. But for now we just assert that's the case.
- DFG_ASSERT(m_graph, node, !(node->flags() & NodeHasVarArgs));
+ RELEASE_ASSERT(!(node->flags() & NodeHasVarArgs));
- nodeRef = m_graph.addNode(SpecNone, Check, originalOrigin, node->children);
+ nodeRef = m_graph.addNode(SpecNone, Phantom, node->codeOrigin, node->children);
return true;
}
diff --git a/Source/JavaScriptCore/dfg/DFGLICMPhase.h b/Source/JavaScriptCore/dfg/DFGLICMPhase.h
index 9c717f23a..601918bfd 100644
--- a/Source/JavaScriptCore/dfg/DFGLICMPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGLICMPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGLICMPhase_h
#define DFGLICMPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGLazyJSValue.cpp b/Source/JavaScriptCore/dfg/DFGLazyJSValue.cpp
index 6011490c9..251c9a7c0 100644
--- a/Source/JavaScriptCore/dfg/DFGLazyJSValue.cpp
+++ b/Source/JavaScriptCore/dfg/DFGLazyJSValue.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,7 +28,7 @@
#if ENABLE(DFG_JIT)
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -36,14 +36,14 @@ JSValue LazyJSValue::getValue(VM& vm) const
{
switch (m_kind) {
case KnownValue:
- return value()->value();
+ return value();
case SingleCharacterString:
return jsSingleCharacterString(&vm, u.character);
case KnownStringImpl:
return jsString(&vm, u.stringImpl);
}
RELEASE_ASSERT_NOT_REACHED();
- return JSValue();
+ return value();
}
static TriState equalToSingleCharacter(JSValue value, UChar character)
@@ -81,11 +81,11 @@ TriState LazyJSValue::strictEqual(const LazyJSValue& other) const
case KnownValue:
switch (other.m_kind) {
case KnownValue:
- return JSValue::pureStrictEqual(value()->value(), other.value()->value());
+ return JSValue::pureStrictEqual(value(), other.value());
case SingleCharacterString:
- return equalToSingleCharacter(value()->value(), other.character());
+ return equalToSingleCharacter(value(), other.character());
case KnownStringImpl:
- return equalToStringImpl(value()->value(), other.stringImpl());
+ return equalToStringImpl(value(), other.stringImpl());
}
break;
case SingleCharacterString:
@@ -113,41 +113,11 @@ TriState LazyJSValue::strictEqual(const LazyJSValue& other) const
return FalseTriState;
}
-uintptr_t LazyJSValue::switchLookupValue(SwitchKind kind) const
-{
- // NB. Not every kind of JSValue will be able to give you a switch lookup
- // value, and this method will assert, or do bad things, if you use it
- // for a kind of value that can't.
- switch (m_kind) {
- case KnownValue:
- switch (kind) {
- case SwitchImm:
- return value()->value().asInt32();
- case SwitchCell:
- return bitwise_cast<uintptr_t>(value()->value().asCell());
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return 0;
- }
- case SingleCharacterString:
- switch (kind) {
- case SwitchChar:
- return character();
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return 0;
- }
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return 0;
- }
-}
-
void LazyJSValue::dumpInContext(PrintStream& out, DumpContext* context) const
{
switch (m_kind) {
case KnownValue:
- value()->dumpInContext(out, context);
+ value().dumpInContext(out, context);
return;
case SingleCharacterString:
out.print("Lazy:SingleCharacterString(");
diff --git a/Source/JavaScriptCore/dfg/DFGLazyJSValue.h b/Source/JavaScriptCore/dfg/DFGLazyJSValue.h
index a1231db04..37a07266d 100644
--- a/Source/JavaScriptCore/dfg/DFGLazyJSValue.h
+++ b/Source/JavaScriptCore/dfg/DFGLazyJSValue.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,10 +26,11 @@
#ifndef DFGLazyJSValue_h
#define DFGLazyJSValue_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
-#include "DFGCommon.h"
-#include "DFGFrozenValue.h"
+#include "JSCJSValue.h"
#include <wtf/text/StringImpl.h>
namespace JSC { namespace DFG {
@@ -45,10 +46,10 @@ enum LazinessKind {
class LazyJSValue {
public:
- LazyJSValue(FrozenValue* value = FrozenValue::emptySingleton())
+ LazyJSValue(JSValue value = JSValue())
: m_kind(KnownValue)
{
- u.value = value;
+ u.value = JSValue::encode(value);
}
static LazyJSValue singleCharacterString(UChar character)
@@ -67,19 +68,19 @@ public:
return result;
}
- FrozenValue* tryGetValue(Graph&) const
+ JSValue tryGetValue() const
{
if (m_kind == KnownValue)
return value();
- return nullptr;
+ return JSValue();
}
JSValue getValue(VM&) const;
- FrozenValue* value() const
+ JSValue value() const
{
ASSERT(m_kind == KnownValue);
- return u.value;
+ return JSValue::decode(u.value);
}
UChar character() const
@@ -96,14 +97,28 @@ public:
TriState strictEqual(const LazyJSValue& other) const;
- uintptr_t switchLookupValue(SwitchKind) const;
+ unsigned switchLookupValue() const
+ {
+ // NB. Not every kind of JSValue will be able to give you a switch lookup
+ // value, and this method will assert, or do bad things, if you use it
+ // for a kind of value that can't.
+ switch (m_kind) {
+ case KnownValue:
+ return value().asInt32();
+ case SingleCharacterString:
+ return character();
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+ }
+ }
void dump(PrintStream&) const;
void dumpInContext(PrintStream&, DumpContext*) const;
private:
union {
- FrozenValue* value;
+ EncodedJSValue value;
UChar character;
StringImpl* stringImpl;
} u;
diff --git a/Source/JavaScriptCore/dfg/DFGLazyNode.cpp b/Source/JavaScriptCore/dfg/DFGLazyNode.cpp
deleted file mode 100644
index c8d0940e7..000000000
--- a/Source/JavaScriptCore/dfg/DFGLazyNode.cpp
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGLazyNode.h"
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-void LazyNode::dump(PrintStream& out) const
-{
- if (!*this)
- out.print("LazyNode:0");
- else {
- if (isNode())
- out.print("LazyNode:@", asNode()->index());
- else
- out.print("LazyNode:FrozenValue(", Graph::opName(op()), ", ", pointerDump(asValue()), ")");
- }
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGLazyNode.h b/Source/JavaScriptCore/dfg/DFGLazyNode.h
deleted file mode 100644
index ffd572f85..000000000
--- a/Source/JavaScriptCore/dfg/DFGLazyNode.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGLazyNode_h
-#define DFGLazyNode_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGCommon.h"
-#include "DFGInsertionSet.h"
-#include <wtf/PrintStream.h>
-
-namespace JSC { namespace DFG {
-
-class LazyNode {
-public:
- static const size_t jsConstantTag = 0;
- static const size_t doubleConstantTag = 1;
- static const size_t int52ConstantTag = 2;
-
- static const uintptr_t tagMask = 0x3;
- static const uintptr_t pointerMask = ~tagMask;
-
- explicit LazyNode(Node* node = nullptr)
- : m_node(node)
- , m_value(reinterpret_cast<uintptr_t>(nullptr))
- {
- if (node && node->isConstant())
- setFrozenValue(node->constant(), node->op());
- }
-
- explicit LazyNode(FrozenValue* value, NodeType op = JSConstant)
- : m_node(nullptr)
- , m_value(reinterpret_cast<uintptr_t>(nullptr))
- {
- setFrozenValue(value, op);
- }
-
- LazyNode(std::nullptr_t)
- : m_node(nullptr)
- , m_value(reinterpret_cast<uintptr_t>(nullptr))
- {
- }
-
- LazyNode(WTF::HashTableDeletedValueType)
- : m_node(reinterpret_cast<Node*>(-1))
- {
- }
-
- void setNode(Node* node)
- {
- m_node = node;
- if (node && node->isConstant())
- setFrozenValue(node->constant(), node->op());
- }
-
- bool isHashTableDeletedValue() const { return m_node == reinterpret_cast<Node*>(-1); }
-
- bool isNode() const { return m_node; }
-
- NodeType op() const
- {
- if (m_node)
- return m_node->op();
-
- switch (m_value & tagMask) {
- case jsConstantTag:
- return JSConstant;
- case doubleConstantTag:
- return DoubleConstant;
- case int52ConstantTag:
- return Int52Constant;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
- }
-
- Node* asNode() const
- {
- ASSERT(m_node || !asValue());
- return m_node;
- }
-
- FrozenValue* asValue() const
- {
- return reinterpret_cast<FrozenValue*>(m_value & pointerMask);
- }
-
- unsigned hash() const
- {
- if (asValue())
- return WTF::PtrHash<FrozenValue*>::hash(asValue());
- return WTF::PtrHash<Node*>::hash(m_node);
- }
-
- bool operator==(const LazyNode& other) const
- {
- if (asValue() || other.asValue())
- return m_value == other.m_value;
- return m_node == other.m_node;
- }
-
- bool operator!=(const LazyNode& other) const
- {
- return !(*this == other);
- }
-
- Node* ensureIsNode(InsertionSet& insertionSet, BasicBlock* block, unsigned nodeIndex)
- {
- if (!m_node)
- m_node = insertionSet.insertConstant(nodeIndex, block->at(nodeIndex)->origin, asValue(), op());
-
- return asNode();
- }
-
- Node* operator->() const { return asNode(); }
-
- Node& operator*() const { return *asNode(); }
-
- bool operator!() const { return !asValue() && !asNode(); }
-
- explicit operator bool() const { return !!*this; }
-
- void dump(PrintStream& out) const;
-
-private:
- void setFrozenValue(FrozenValue* value, NodeType op)
- {
- ASSERT(value);
- m_value = reinterpret_cast<uintptr_t>(value);
- ASSERT(m_value == (m_value & pointerMask));
- switch (op) {
- case JSConstant:
- m_value |= jsConstantTag;
- break;
- case DoubleConstant:
- m_value |= doubleConstantTag;
- break;
- case Int52Constant:
- m_value |= int52ConstantTag;
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
-
- Node* m_node;
- uintptr_t m_value;
-};
-
-} } // namespace JSC::DFG
-
-namespace WTF {
-
-template<typename T> struct HashTraits;
-template<> struct HashTraits<JSC::DFG::LazyNode> : SimpleClassHashTraits<JSC::DFG::LazyNode> {
- static const bool emptyValueIsZero = true;
-};
-
-} // namespace WTF
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGLazyNode_h
diff --git a/Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.cpp b/Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.cpp
index 5e7d5bb71..65c4105bc 100644
--- a/Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,7 +32,7 @@
#include "DFGGraph.h"
#include "DFGInsertionSet.h"
#include "DFGPhase.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -57,7 +57,6 @@ public:
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
- block->ssa->liveAtTailIsDirty = true;
block->ssa->liveAtHead.clear();
block->ssa->liveAtTail.clear();
}
@@ -69,11 +68,12 @@ public:
} while (m_changed);
if (!m_graph.block(0)->ssa->liveAtHead.isEmpty()) {
- DFG_CRASH(
- m_graph, nullptr,
- toCString(
- "Bad liveness analysis result: live at root is not empty: ",
- nodeListDump(m_graph.block(0)->ssa->liveAtHead)).data());
+ dataLog(
+ "Bad liveness analysis result: live at root is not empty: ",
+ nodeListDump(m_graph.block(0)->ssa->liveAtHead), "\n");
+ dataLog("IR at time of error:\n");
+ m_graph.dump();
+ CRASH();
}
return true;
@@ -85,12 +85,11 @@ private:
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
return;
-
- if (!block->ssa->liveAtTailIsDirty)
- return;
- block->ssa->liveAtTailIsDirty = false;
-
+
+ // FIXME: It's likely that this can be improved, for static analyses that use
+ // HashSets. https://bugs.webkit.org/show_bug.cgi?id=118455
m_live = block->ssa->liveAtTail;
+
for (unsigned nodeIndex = block->size(); nodeIndex--;) {
Node* node = block->at(nodeIndex);
@@ -132,17 +131,13 @@ private:
}
}
- for (Node* node : m_live) {
- if (!block->ssa->liveAtHead.contains(node)) {
- m_changed = true;
- for (unsigned i = block->predecessors.size(); i--;) {
- BasicBlock* predecessor = block->predecessors[i];
- if (predecessor->ssa->liveAtTail.add(node).isNewEntry)
- predecessor->ssa->liveAtTailIsDirty = true;
- }
- }
- }
- block->ssa->liveAtHead = WTF::move(m_live);
+ if (m_live == block->ssa->liveAtHead)
+ return;
+
+ m_changed = true;
+ block->ssa->liveAtHead = m_live;
+ for (unsigned i = block->predecessors.size(); i--;)
+ block->predecessors[i]->ssa->liveAtTail.add(m_live.begin(), m_live.end());
}
void addChildUse(Node*, Edge& edge)
diff --git a/Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.h b/Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.h
index 28b72853c..806611235 100644
--- a/Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGLivenessAnalysisPhase_h
#define DFGLivenessAnalysisPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
diff --git a/Source/JavaScriptCore/dfg/DFGLongLivedState.cpp b/Source/JavaScriptCore/dfg/DFGLongLivedState.cpp
index 7a2f2f1f0..26dc16c7c 100644
--- a/Source/JavaScriptCore/dfg/DFGLongLivedState.cpp
+++ b/Source/JavaScriptCore/dfg/DFGLongLivedState.cpp
@@ -28,8 +28,6 @@
#if ENABLE(DFG_JIT)
-#include "JSCInlines.h"
-
namespace JSC { namespace DFG {
LongLivedState::LongLivedState()
diff --git a/Source/JavaScriptCore/dfg/DFGLongLivedState.h b/Source/JavaScriptCore/dfg/DFGLongLivedState.h
index 77fab0407..9eb676885 100644
--- a/Source/JavaScriptCore/dfg/DFGLongLivedState.h
+++ b/Source/JavaScriptCore/dfg/DFGLongLivedState.h
@@ -26,6 +26,8 @@
#ifndef DFGLongLivedState_h
#define DFGLongLivedState_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGNodeAllocator.h"
diff --git a/Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.cpp b/Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.cpp
index 340ef2d18..507e00ff5 100644
--- a/Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.cpp
@@ -32,17 +32,16 @@
#include "DFGBlockInsertionSet.h"
#include "DFGGraph.h"
#include "DFGPhase.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include <wtf/HashMap.h>
namespace JSC { namespace DFG {
BasicBlock* createPreHeader(Graph& graph, BlockInsertionSet& insertionSet, BasicBlock* block)
{
- // Don't bother to preserve execution frequencies for now.
- BasicBlock* preHeader = insertionSet.insertBefore(block, PNaN);
+ BasicBlock* preHeader = insertionSet.insertBefore(block);
preHeader->appendNode(
- graph, SpecNone, Jump, block->firstOrigin(), OpInfo(block));
+ graph, SpecNone, Jump, block->at(0)->codeOrigin, OpInfo(block));
for (unsigned predecessorIndex = 0; predecessorIndex < block->predecessors.size(); predecessorIndex++) {
BasicBlock* predecessor = block->predecessors[predecessorIndex];
@@ -88,23 +87,11 @@ public:
existingPreHeader = predecessor;
continue;
}
- // We won't have duplicate entries in the predecessors list.
- DFG_ASSERT(m_graph, nullptr, existingPreHeader != predecessor);
+ if (existingPreHeader == predecessor)
+ continue;
needsNewPreHeader = true;
break;
}
-
- // This phase should only be run on a DFG where unreachable blocks have been pruned.
- // We also don't allow loops back to root. This means that every loop header has got
- // to have a pre-header.
- DFG_ASSERT(m_graph, nullptr, existingPreHeader);
-
- // We are looking at the predecessors of a loop header. A loop header has to have
- // some predecessor other than the pre-header. We must have broken critical edges
- // because that is the DFG SSA convention. Therefore, each predecessor of the loop
- // header must have only one successor.
- DFG_ASSERT(m_graph, nullptr, existingPreHeader->terminal()->op() == Jump);
-
if (!needsNewPreHeader)
continue;
diff --git a/Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.h b/Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.h
index bdd62167d..a229875c8 100644
--- a/Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGLoopPreHeaderCreationPhase_h
#define DFGLoopPreHeaderCreationPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGMayExit.cpp b/Source/JavaScriptCore/dfg/DFGMayExit.cpp
deleted file mode 100644
index 4631ecd56..000000000
--- a/Source/JavaScriptCore/dfg/DFGMayExit.cpp
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGMayExit.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGGraph.h"
-#include "DFGNode.h"
-#include "Operations.h"
-
-namespace JSC { namespace DFG {
-
-namespace {
-
-class EdgeMayExit {
-public:
- EdgeMayExit()
- : m_result(false)
- {
- }
-
- void operator()(Node*, Edge edge)
- {
- if (edge.willHaveCheck()) {
- m_result = true;
- return;
- }
-
- switch (edge.useKind()) {
- // These are shady because nodes that have these use kinds will typically exit for
- // unrelated reasons. For example CompareEq doesn't usually exit, but if it uses ObjectUse
- // then it will.
- case ObjectUse:
- case ObjectOrOtherUse:
- m_result = true;
- break;
-
- // These are shady because they check the structure even if the type of the child node
- // passes the StringObject type filter.
- case StringObjectUse:
- case StringOrStringObjectUse:
- m_result = true;
- break;
-
- default:
- break;
- }
- }
-
- bool result() const { return m_result; }
-
-private:
- bool m_result;
-};
-
-} // anonymous namespace
-
-bool mayExit(Graph& graph, Node* node)
-{
- switch (node->op()) {
- // This is a carefully curated list of nodes that definitely do not exit. We try to be very
- // conservative when maintaining this list, because adding new node types to it doesn't
- // generally make things a lot better but it might introduce insanely subtle bugs.
- case SetArgument:
- case JSConstant:
- case DoubleConstant:
- case Int52Constant:
- case MovHint:
- case SetLocal:
- case Flush:
- case Phantom:
- case Check:
- case GetLocal:
- case LoopHint:
- case Phi:
- case Upsilon:
- case ZombieHint:
- case BottomValue:
- case PutHint:
- case PhantomNewObject:
- case PutStack:
- case KillStack:
- case GetStack:
- case GetCallee:
- case GetArgumentCount:
- case GetScope:
- case PhantomLocal:
- case CountExecution:
- case Jump:
- case Branch:
- case Unreachable:
- case DoubleRep:
- case Int52Rep:
- case ValueRep:
- break;
-
- default:
- // If in doubt, return true.
- return true;
- }
-
- EdgeMayExit functor;
- DFG_NODE_DO_TO_CHILDREN(graph, node, functor);
- return functor.result();
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGMayExit.h b/Source/JavaScriptCore/dfg/DFGMayExit.h
deleted file mode 100644
index e5ae04072..000000000
--- a/Source/JavaScriptCore/dfg/DFGMayExit.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGMayExit_h
-#define DFGMayExit_h
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-class Graph;
-struct Node;
-
-// A *very* conservative approximation of whether or not a node could possibly exit. Usually
-// returns true except in cases where we obviously don't expect an exit.
-
-bool mayExit(Graph&, Node*);
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGMayExit_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGMinifiedGraph.cpp b/Source/JavaScriptCore/dfg/DFGMinifiedGraph.cpp
deleted file mode 100644
index 61f331f10..000000000
--- a/Source/JavaScriptCore/dfg/DFGMinifiedGraph.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGMinifiedGraph.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "JSCInlines.h"
-#include "TrackedReferences.h"
-
-namespace JSC { namespace DFG {
-
-void MinifiedGraph::prepareAndShrink()
-{
- std::sort(m_list.begin(), m_list.end(), MinifiedNode::compareByNodeIndex);
- m_list.shrinkToFit();
-}
-
-void MinifiedGraph::validateReferences(const TrackedReferences& trackedReferences)
-{
- for (MinifiedNode& node : m_list) {
- if (node.hasConstant())
- trackedReferences.check(node.constant());
- }
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGMinifiedGraph.h b/Source/JavaScriptCore/dfg/DFGMinifiedGraph.h
index 4fc44f8cc..892a20648 100644
--- a/Source/JavaScriptCore/dfg/DFGMinifiedGraph.h
+++ b/Source/JavaScriptCore/dfg/DFGMinifiedGraph.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGMinifiedGraph_h
#define DFGMinifiedGraph_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGMinifiedNode.h"
@@ -33,11 +35,7 @@
#include <wtf/StdLibExtras.h>
#include <wtf/Vector.h>
-namespace JSC {
-
-class TrackedReferences;
-
-namespace DFG {
+namespace JSC { namespace DFG {
class MinifiedGraph {
public:
@@ -54,9 +52,11 @@ public:
m_list.append(node);
}
- void prepareAndShrink();
-
- void validateReferences(const TrackedReferences&);
+ void prepareAndShrink()
+ {
+ std::sort(m_list.begin(), m_list.end(), MinifiedNode::compareByNodeIndex);
+ m_list.shrinkToFit();
+ }
private:
Vector<MinifiedNode> m_list;
diff --git a/Source/JavaScriptCore/dfg/DFGMinifiedID.h b/Source/JavaScriptCore/dfg/DFGMinifiedID.h
index bdb312d81..24ea25645 100644
--- a/Source/JavaScriptCore/dfg/DFGMinifiedID.h
+++ b/Source/JavaScriptCore/dfg/DFGMinifiedID.h
@@ -26,6 +26,10 @@
#ifndef DFGMinifiedID_h
#define DFGMinifiedID_h
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
#include "DFGCommon.h"
#include <wtf/HashMap.h>
#include <wtf/PrintStream.h>
@@ -35,7 +39,6 @@ namespace JSC { namespace DFG {
class Graph;
class MinifiedNode;
class ValueSource;
-struct Node;
class MinifiedID {
public:
@@ -97,11 +100,11 @@ template<> struct DefaultHash<JSC::DFG::MinifiedID> {
};
template<typename T> struct HashTraits;
-template<> struct HashTraits<JSC::DFG::MinifiedID> : SimpleClassHashTraits<JSC::DFG::MinifiedID> {
- static const bool emptyValueIsZero = false;
-};
+template<> struct HashTraits<JSC::DFG::MinifiedID> : SimpleClassHashTraits<JSC::DFG::MinifiedID> { };
} // namespace WTF
+#endif // ENABLE(DFG_JIT)
+
#endif // DFGMinifiedID_h
diff --git a/Source/JavaScriptCore/dfg/DFGMinifiedNode.cpp b/Source/JavaScriptCore/dfg/DFGMinifiedNode.cpp
index 80795c2fe..802cb2984 100644
--- a/Source/JavaScriptCore/dfg/DFGMinifiedNode.cpp
+++ b/Source/JavaScriptCore/dfg/DFGMinifiedNode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,7 +29,6 @@
#if ENABLE(DFG_JIT)
#include "DFGNode.h"
-#include "JSCInlines.h"
namespace JSC { namespace DFG {
@@ -39,11 +38,13 @@ MinifiedNode MinifiedNode::fromNode(Node* node)
MinifiedNode result;
result.m_id = MinifiedID(node);
result.m_op = node->op();
- if (hasConstant(node->op()))
- result.m_info = JSValue::encode(node->asJSValue());
+ if (hasConstantNumber(node->op()))
+ result.m_info = node->constantNumber();
+ else if (hasWeakConstant(node->op()))
+ result.m_info = bitwise_cast<uintptr_t>(node->weakConstant());
else {
- ASSERT(node->op() == PhantomDirectArguments || node->op() == PhantomClonedArguments);
- result.m_info = bitwise_cast<uintptr_t>(node->origin.semantic.inlineCallFrame);
+ ASSERT(node->op() == PhantomArguments);
+ result.m_info = 0;
}
return result;
}
diff --git a/Source/JavaScriptCore/dfg/DFGMinifiedNode.h b/Source/JavaScriptCore/dfg/DFGMinifiedNode.h
index 29798bc22..afea6aeed 100644
--- a/Source/JavaScriptCore/dfg/DFGMinifiedNode.h
+++ b/Source/JavaScriptCore/dfg/DFGMinifiedNode.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2014, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGMinifiedNode_h
#define DFGMinifiedNode_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
@@ -40,10 +42,8 @@ inline bool belongsInMinifiedGraph(NodeType type)
{
switch (type) {
case JSConstant:
- case Int52Constant:
- case DoubleConstant:
- case PhantomDirectArguments:
- case PhantomClonedArguments:
+ case WeakJSConstant:
+ case PhantomArguments:
return true;
default:
return false;
@@ -59,18 +59,22 @@ public:
MinifiedID id() const { return m_id; }
NodeType op() const { return m_op; }
- bool hasConstant() const { return hasConstant(m_op); }
+ bool hasConstant() const { return hasConstantNumber() || hasWeakConstant(); }
+
+ bool hasConstantNumber() const { return hasConstantNumber(m_op); }
- JSValue constant() const
+ unsigned constantNumber() const
{
- return JSValue::decode(bitwise_cast<EncodedJSValue>(m_info));
+ ASSERT(hasConstantNumber(m_op));
+ return m_info;
}
- bool hasInlineCallFrame() const { return hasInlineCallFrame(m_op); }
+ bool hasWeakConstant() const { return hasWeakConstant(m_op); }
- InlineCallFrame* inlineCallFrame() const
+ JSCell* weakConstant() const
{
- return bitwise_cast<InlineCallFrame*>(static_cast<uintptr_t>(m_info));
+ ASSERT(hasWeakConstant(m_op));
+ return bitwise_cast<JSCell*>(m_info);
}
static MinifiedID getID(MinifiedNode* node) { return node->id(); }
@@ -80,18 +84,17 @@ public:
}
private:
- static bool hasConstant(NodeType type)
+ static bool hasConstantNumber(NodeType type)
{
- return type == JSConstant || type == Int52Constant || type == DoubleConstant;
+ return type == JSConstant;
}
-
- static bool hasInlineCallFrame(NodeType type)
+ static bool hasWeakConstant(NodeType type)
{
- return type == PhantomDirectArguments || type == PhantomClonedArguments;
+ return type == WeakJSConstant;
}
MinifiedID m_id;
- uint64_t m_info;
+ uintptr_t m_info;
NodeType m_op;
};
diff --git a/Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.cpp b/Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.cpp
deleted file mode 100644
index e2cf37e90..000000000
--- a/Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.cpp
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGMovHintRemovalPhase.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "BytecodeLivenessAnalysisInlines.h"
-#include "DFGEpoch.h"
-#include "DFGForAllKills.h"
-#include "DFGGraph.h"
-#include "DFGInsertionSet.h"
-#include "DFGMayExit.h"
-#include "DFGPhase.h"
-#include "JSCInlines.h"
-#include "OperandsInlines.h"
-
-namespace JSC { namespace DFG {
-
-namespace {
-
-bool verbose = false;
-
-class MovHintRemovalPhase : public Phase {
-public:
- MovHintRemovalPhase(Graph& graph)
- : Phase(graph, "MovHint removal")
- , m_state(OperandsLike, graph.block(0)->variablesAtHead)
- , m_changed(false)
- {
- }
-
- bool run()
- {
- if (verbose) {
- dataLog("Graph before MovHint removal:\n");
- m_graph.dump();
- }
-
- for (BasicBlock* block : m_graph.blocksInNaturalOrder())
- handleBlock(block);
-
- return m_changed;
- }
-
-private:
- void handleBlock(BasicBlock* block)
- {
- if (verbose)
- dataLog("Handing block ", pointerDump(block), "\n");
-
- // A MovHint is unnecessary if the local dies before it is used. We answer this question by
- // maintaining the current exit epoch, and associating an epoch with each local. When a
- // local dies, it gets the current exit epoch. If a MovHint occurs in the same epoch as its
- // local, then it means there was no exit between the local's death and the MovHint - i.e.
- // the MovHint is unnecessary.
-
- Epoch currentEpoch = Epoch::first();
-
- m_state.fill(Epoch());
- m_graph.forAllLiveInBytecode(
- block->terminal()->origin.forExit,
- [&] (VirtualRegister reg) {
- m_state.operand(reg) = currentEpoch;
- });
-
- if (verbose)
- dataLog(" Locals: ", m_state, "\n");
-
- // Assume that blocks after us exit.
- currentEpoch.bump();
-
- for (unsigned nodeIndex = block->size(); nodeIndex--;) {
- Node* node = block->at(nodeIndex);
-
- if (node->op() == MovHint) {
- Epoch localEpoch = m_state.operand(node->unlinkedLocal());
- if (verbose)
- dataLog(" At ", node, ": current = ", currentEpoch, ", local = ", localEpoch, "\n");
- if (!localEpoch || localEpoch == currentEpoch) {
- node->setOpAndDefaultFlags(ZombieHint);
- node->child1() = Edge();
- m_changed = true;
- }
- m_state.operand(node->unlinkedLocal()) = Epoch();
- }
-
- if (mayExit(m_graph, node))
- currentEpoch.bump();
-
- if (nodeIndex) {
- forAllKilledOperands(
- m_graph, block->at(nodeIndex - 1), node,
- [&] (VirtualRegister reg) {
- // This function is a bit sloppy - it might claim to kill a local even if
- // it's still live after. We need to protect against that.
- if (!!m_state.operand(reg))
- return;
-
- if (verbose)
- dataLog(" Killed operand at ", node, ": ", reg, "\n");
- m_state.operand(reg) = currentEpoch;
- });
- }
- }
- }
-
- Operands<Epoch> m_state;
- bool m_changed;
-};
-
-} // anonymous namespace
-
-bool performMovHintRemoval(Graph& graph)
-{
- SamplingRegion samplingRegion("DFG MovHint Removal Phase");
- return runPhase<MovHintRemovalPhase>(graph);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.h b/Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.h
deleted file mode 100644
index dd4c20626..000000000
--- a/Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGMovHintRemovalPhase_h
-#define DFGMovHintRemovalPhase_h
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-class Graph;
-
-// Cleans up unnecessary MovHints. A MovHint is necessary if the variable dies before there is an
-// exit.
-
-bool performMovHintRemoval(Graph&);
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGMovHintRemovalPhase_h
diff --git a/Source/JavaScriptCore/dfg/DFGMultiGetByOffsetData.cpp b/Source/JavaScriptCore/dfg/DFGMultiGetByOffsetData.cpp
deleted file mode 100644
index d1a111be7..000000000
--- a/Source/JavaScriptCore/dfg/DFGMultiGetByOffsetData.cpp
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGMultiGetByOffsetData.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGFrozenValue.h"
-#include "JSCInlines.h"
-
-namespace JSC { namespace DFG {
-
-void GetByOffsetMethod::dumpInContext(PrintStream& out, DumpContext* context) const
-{
- out.print(m_kind, ":");
- switch (m_kind) {
- case Invalid:
- out.print("<none>");
- return;
- case Constant:
- out.print(pointerDumpInContext(constant(), context));
- return;
- case Load:
- out.print(offset());
- return;
- case LoadFromPrototype:
- out.print(offset(), "@", pointerDumpInContext(prototype(), context));
- return;
- }
-}
-
-void GetByOffsetMethod::dump(PrintStream& out) const
-{
- dumpInContext(out, nullptr);
-}
-
-void MultiGetByOffsetCase::dumpInContext(PrintStream& out, DumpContext* context) const
-{
- out.print(inContext(m_set, context), ":", inContext(m_method, context));
-}
-
-void MultiGetByOffsetCase::dump(PrintStream& out) const
-{
- dumpInContext(out, nullptr);
-}
-
-} } // namespace JSC::DFG
-
-namespace WTF {
-
-using namespace JSC::DFG;
-
-void printInternal(PrintStream& out, GetByOffsetMethod::Kind kind)
-{
- switch (kind) {
- case GetByOffsetMethod::Invalid:
- out.print("Invalid");
- return;
- case GetByOffsetMethod::Constant:
- out.print("Constant");
- return;
- case GetByOffsetMethod::Load:
- out.print("Load");
- return;
- case GetByOffsetMethod::LoadFromPrototype:
- out.print("LoadFromPrototype");
- return;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
-}
-
-} // namespace WTF
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGMultiGetByOffsetData.h b/Source/JavaScriptCore/dfg/DFGMultiGetByOffsetData.h
deleted file mode 100644
index 72680bf61..000000000
--- a/Source/JavaScriptCore/dfg/DFGMultiGetByOffsetData.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGMultiGetByOffsetData_h
-#define DFGMultiGetByOffsetData_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DumpContext.h"
-#include "JSObject.h"
-#include "StructureSet.h"
-
-namespace JSC { namespace DFG {
-
-class FrozenValue;
-
-class GetByOffsetMethod {
-public:
- enum Kind {
- Invalid,
- Constant,
- Load,
- LoadFromPrototype
- };
-
- GetByOffsetMethod()
- : m_kind(Invalid)
- {
- }
-
- static GetByOffsetMethod constant(FrozenValue* value)
- {
- GetByOffsetMethod result;
- result.m_kind = Constant;
- result.u.constant = value;
- return result;
- }
-
- static GetByOffsetMethod load(PropertyOffset offset)
- {
- GetByOffsetMethod result;
- result.m_kind = Load;
- result.u.load.offset = offset;
- return result;
- }
-
- static GetByOffsetMethod loadFromPrototype(FrozenValue* prototype, PropertyOffset offset)
- {
- GetByOffsetMethod result;
- result.m_kind = LoadFromPrototype;
- result.u.load.prototype = prototype;
- result.u.load.offset = offset;
- return result;
- }
-
- bool operator!() const { return m_kind == Invalid; }
-
- Kind kind() const { return m_kind; }
-
- FrozenValue* constant() const
- {
- ASSERT(kind() == Constant);
- return u.constant;
- }
-
- FrozenValue* prototype() const
- {
- ASSERT(kind() == LoadFromPrototype);
- return u.load.prototype;
- }
-
- PropertyOffset offset() const
- {
- ASSERT(kind() == Load || kind() == LoadFromPrototype);
- return u.load.offset;
- }
-
- void dumpInContext(PrintStream&, DumpContext*) const;
- void dump(PrintStream&) const;
-
-private:
- union {
- FrozenValue* constant;
- struct {
- FrozenValue* prototype;
- PropertyOffset offset;
- } load;
- } u;
- Kind m_kind;
-};
-
-class MultiGetByOffsetCase {
-public:
- MultiGetByOffsetCase()
- {
- }
-
- MultiGetByOffsetCase(const StructureSet& set, const GetByOffsetMethod& method)
- : m_set(set)
- , m_method(method)
- {
- }
-
- StructureSet& set() { return m_set; }
- const StructureSet& set() const { return m_set; }
- const GetByOffsetMethod& method() const { return m_method; }
-
- void dumpInContext(PrintStream&, DumpContext*) const;
- void dump(PrintStream&) const;
-
-private:
- StructureSet m_set;
- GetByOffsetMethod m_method;
-};
-
-struct MultiGetByOffsetData {
- unsigned identifierNumber;
- Vector<MultiGetByOffsetCase, 2> cases;
-};
-
-} } // namespace JSC::DFG
-
-namespace WTF {
-
-void printInternal(PrintStream&, JSC::DFG::GetByOffsetMethod::Kind);
-
-} // namespace WTF
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGMultiGetByOffsetData_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGNaiveDominators.cpp b/Source/JavaScriptCore/dfg/DFGNaiveDominators.cpp
deleted file mode 100644
index eb8c63aec..000000000
--- a/Source/JavaScriptCore/dfg/DFGNaiveDominators.cpp
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (C) 2011, 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGNaiveDominators.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGGraph.h"
-#include "JSCInlines.h"
-
-namespace JSC { namespace DFG {
-
-NaiveDominators::NaiveDominators()
-{
-}
-
-NaiveDominators::~NaiveDominators()
-{
-}
-
-void NaiveDominators::compute(Graph& graph)
-{
- // This implements a naive dominator solver.
-
- ASSERT(graph.block(0)->predecessors.isEmpty());
-
- unsigned numBlocks = graph.numBlocks();
-
- // Allocate storage for the dense dominance matrix.
- if (numBlocks > m_results.size()) {
- m_results.grow(numBlocks);
- for (unsigned i = numBlocks; i--;)
- m_results[i].resize(numBlocks);
- m_scratch.resize(numBlocks);
- }
-
- // We know that the entry block is only dominated by itself.
- m_results[0].clearAll();
- m_results[0].set(0);
-
- // Find all of the valid blocks.
- m_scratch.clearAll();
- for (unsigned i = numBlocks; i--;) {
- if (!graph.block(i))
- continue;
- m_scratch.set(i);
- }
-
- // Mark all nodes as dominated by everything.
- for (unsigned i = numBlocks; i-- > 1;) {
- if (!graph.block(i) || graph.block(i)->predecessors.isEmpty())
- m_results[i].clearAll();
- else
- m_results[i].set(m_scratch);
- }
-
- // Iteratively eliminate nodes that are not dominator.
- bool changed;
- do {
- changed = false;
- // Prune dominators in all non entry blocks: forward scan.
- for (unsigned i = 1; i < numBlocks; ++i)
- changed |= pruneDominators(graph, i);
-
- if (!changed)
- break;
-
- // Prune dominators in all non entry blocks: backward scan.
- changed = false;
- for (unsigned i = numBlocks; i-- > 1;)
- changed |= pruneDominators(graph, i);
- } while (changed);
-}
-
-bool NaiveDominators::pruneDominators(Graph& graph, BlockIndex idx)
-{
- BasicBlock* block = graph.block(idx);
-
- if (!block || block->predecessors.isEmpty())
- return false;
-
- // Find the intersection of dom(preds).
- m_scratch.set(m_results[block->predecessors[0]->index]);
- for (unsigned j = block->predecessors.size(); j-- > 1;)
- m_scratch.filter(m_results[block->predecessors[j]->index]);
-
- // The block is also dominated by itself.
- m_scratch.set(idx);
-
- return m_results[idx].setAndCheck(m_scratch);
-}
-
-void NaiveDominators::dump(Graph& graph, PrintStream& out) const
-{
- for (BlockIndex blockIndex = 0; blockIndex < graph.numBlocks(); ++blockIndex) {
- BasicBlock* block = graph.block(blockIndex);
- if (!block)
- continue;
- out.print(" Block ", *block, ":");
- for (BlockIndex otherIndex = 0; otherIndex < graph.numBlocks(); ++otherIndex) {
- if (!dominates(block->index, otherIndex))
- continue;
- out.print(" #", otherIndex);
- }
- out.print("\n");
- }
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGNaiveDominators.h b/Source/JavaScriptCore/dfg/DFGNaiveDominators.h
deleted file mode 100644
index d88dd3acc..000000000
--- a/Source/JavaScriptCore/dfg/DFGNaiveDominators.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2011, 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGNaiveDominators_h
-#define DFGNaiveDominators_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGBasicBlock.h"
-#include "DFGCommon.h"
-#include <wtf/FastBitVector.h>
-
-namespace JSC { namespace DFG {
-
-class Graph;
-
-// This class is only used for validating the real dominators implementation.
-
-class NaiveDominators {
-public:
- NaiveDominators();
- ~NaiveDominators();
-
- void compute(Graph&);
-
- bool dominates(BlockIndex from, BlockIndex to) const
- {
- return m_results[to].get(from);
- }
-
- bool dominates(BasicBlock* from, BasicBlock* to) const
- {
- return dominates(from->index, to->index);
- }
-
- void dump(Graph&, PrintStream&) const;
-
-private:
- bool pruneDominators(Graph&, BlockIndex);
-
- Vector<FastBitVector> m_results; // For each block, the bitvector of blocks that dominate it.
- FastBitVector m_scratch; // A temporary bitvector with bit for each block. We recycle this to save new/deletes.
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGNaiveDominators_h
diff --git a/Source/JavaScriptCore/dfg/DFGNaturalLoops.cpp b/Source/JavaScriptCore/dfg/DFGNaturalLoops.cpp
index edb78cf5e..848917f70 100644
--- a/Source/JavaScriptCore/dfg/DFGNaturalLoops.cpp
+++ b/Source/JavaScriptCore/dfg/DFGNaturalLoops.cpp
@@ -29,7 +29,6 @@
#if ENABLE(DFG_JIT)
#include "DFGGraph.h"
-#include "JSCInlines.h"
#include <wtf/CommaPrinter.h>
namespace JSC { namespace DFG {
@@ -45,11 +44,6 @@ void NaturalLoop::dump(PrintStream& out) const
NaturalLoops::NaturalLoops() { }
NaturalLoops::~NaturalLoops() { }
-void NaturalLoops::computeDependencies(Graph& graph)
-{
- graph.m_dominators.computeIfNecessary(graph);
-}
-
void NaturalLoops::compute(Graph& graph)
{
// Implement the classic dominator-based natural loop finder. The first
@@ -62,9 +56,11 @@ void NaturalLoops::compute(Graph& graph)
static const bool verbose = false;
+ graph.m_dominators.computeIfNecessary(graph);
+
if (verbose) {
dataLog("Dominators:\n");
- graph.m_dominators.dump(WTF::dataFile());
+ graph.m_dominators.dump(graph, WTF::dataFile());
}
m_loops.resize(0);
diff --git a/Source/JavaScriptCore/dfg/DFGNaturalLoops.h b/Source/JavaScriptCore/dfg/DFGNaturalLoops.h
index 57225e36f..7ad0b0bdc 100644
--- a/Source/JavaScriptCore/dfg/DFGNaturalLoops.h
+++ b/Source/JavaScriptCore/dfg/DFGNaturalLoops.h
@@ -26,6 +26,8 @@
#ifndef DFGNaturalLoops_h
#define DFGNaturalLoops_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGAnalysis.h"
@@ -93,7 +95,6 @@ public:
NaturalLoops();
~NaturalLoops();
- void computeDependencies(Graph&);
void compute(Graph&);
unsigned numLoops() const
@@ -155,14 +156,6 @@ public:
return false;
}
- unsigned loopDepth(BasicBlock* block) const
- {
- unsigned depth = 0;
- for (const NaturalLoop* loop = innerMostLoopOf(block); loop; loop = innerMostOuterLoop(*loop))
- depth++;
- return depth;
- }
-
// Return the indices of all loops this belongs to.
Vector<const NaturalLoop*> loopsOf(BasicBlock*) const;
diff --git a/Source/JavaScriptCore/dfg/DFGNode.cpp b/Source/JavaScriptCore/dfg/DFGNode.cpp
index 6a9853424..bf43f29a4 100644
--- a/Source/JavaScriptCore/dfg/DFGNode.cpp
+++ b/Source/JavaScriptCore/dfg/DFGNode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,40 +30,9 @@
#include "DFGGraph.h"
#include "DFGNodeAllocator.h"
-#include "DFGPromotedHeapLocation.h"
-#include "JSCInlines.h"
namespace JSC { namespace DFG {
-bool MultiPutByOffsetData::writesStructures() const
-{
- for (unsigned i = variants.size(); i--;) {
- if (variants[i].writesStructures())
- return true;
- }
- return false;
-}
-
-bool MultiPutByOffsetData::reallocatesStorage() const
-{
- for (unsigned i = variants.size(); i--;) {
- if (variants[i].reallocatesStorage())
- return true;
- }
- return false;
-}
-
-void BranchTarget::dump(PrintStream& out) const
-{
- if (!block)
- return;
-
- out.print(*block);
-
- if (count == count) // If the count is not NaN, then print it.
- out.print("/w:", count);
-}
-
unsigned Node::index() const
{
return NodeAllocator::allocatorOf(this)->indexOf(this);
@@ -75,6 +44,7 @@ bool Node::hasVariableAccessData(Graph& graph)
case Phi:
return graph.m_form != SSA;
case GetLocal:
+ case GetArgument:
case SetLocal:
case SetArgument:
case Flush:
@@ -85,119 +55,6 @@ bool Node::hasVariableAccessData(Graph& graph)
}
}
-void Node::remove()
-{
- ASSERT(!(flags() & NodeHasVarArgs));
-
- children = children.justChecks();
-
- setOpAndDefaultFlags(Check);
-}
-
-void Node::convertToIdentity()
-{
- RELEASE_ASSERT(child1());
- RELEASE_ASSERT(!child2());
- NodeFlags result = canonicalResultRepresentation(this->result());
- setOpAndDefaultFlags(Identity);
- setResult(result);
-}
-
-void Node::convertToIdentityOn(Node* child)
-{
- children.reset();
- child1() = child->defaultEdge();
- NodeFlags output = canonicalResultRepresentation(this->result());
- NodeFlags input = canonicalResultRepresentation(child->result());
- if (output == input) {
- setOpAndDefaultFlags(Identity);
- setResult(output);
- return;
- }
- switch (output) {
- case NodeResultDouble:
- setOpAndDefaultFlags(DoubleRep);
- switch (input) {
- case NodeResultInt52:
- child1().setUseKind(Int52RepUse);
- return;
- case NodeResultJS:
- child1().setUseKind(NumberUse);
- return;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return;
- }
- case NodeResultInt52:
- setOpAndDefaultFlags(Int52Rep);
- switch (input) {
- case NodeResultDouble:
- child1().setUseKind(DoubleRepMachineIntUse);
- return;
- case NodeResultJS:
- child1().setUseKind(MachineIntUse);
- return;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return;
- }
- case NodeResultJS:
- setOpAndDefaultFlags(ValueRep);
- switch (input) {
- case NodeResultDouble:
- child1().setUseKind(DoubleRepUse);
- return;
- case NodeResultInt52:
- child1().setUseKind(Int52RepUse);
- return;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return;
- }
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return;
- }
-}
-
-void Node::convertToPutHint(const PromotedLocationDescriptor& descriptor, Node* base, Node* value)
-{
- m_op = PutHint;
- m_opInfo = descriptor.imm1().m_value;
- m_opInfo2 = descriptor.imm2().m_value;
- child1() = base->defaultEdge();
- child2() = value->defaultEdge();
- child3() = Edge();
-}
-
-void Node::convertToPutStructureHint(Node* structure)
-{
- ASSERT(m_op == PutStructure);
- ASSERT(structure->castConstant<Structure*>() == transition()->next);
- convertToPutHint(StructurePLoc, child1().node(), structure);
-}
-
-void Node::convertToPutByOffsetHint()
-{
- ASSERT(m_op == PutByOffset);
- convertToPutHint(
- PromotedLocationDescriptor(NamedPropertyPLoc, storageAccessData().identifierNumber),
- child2().node(), child3().node());
-}
-
-void Node::convertToPutClosureVarHint()
-{
- ASSERT(m_op == PutClosureVar);
- convertToPutHint(
- PromotedLocationDescriptor(ClosureVarPLoc, scopeOffset().offset()),
- child1().node(), child2().node());
-}
-
-PromotedLocationDescriptor Node::promotedLocationDescriptor()
-{
- return PromotedLocationDescriptor(static_cast<PromotedLocationKind>(m_opInfo), m_opInfo2);
-}
-
} } // namespace JSC::DFG
namespace WTF {
@@ -217,9 +74,6 @@ void printInternal(PrintStream& out, SwitchKind kind)
case SwitchString:
out.print("SwitchString");
return;
- case SwitchCell:
- out.print("SwitchCell");
- return;
}
RELEASE_ASSERT_NOT_REACHED();
}
@@ -231,10 +85,7 @@ void printInternal(PrintStream& out, Node* node)
return;
}
out.print("@", node->index());
- if (node->hasDoubleResult())
- out.print("<Double>");
- else if (node->hasInt52Result())
- out.print("<Int52>");
+ out.print(AbbreviatedSpeculationDump(node->prediction()));
}
} // namespace WTF
diff --git a/Source/JavaScriptCore/dfg/DFGNode.h b/Source/JavaScriptCore/dfg/DFGNode.h
index 83fcf5c10..55a9ede8d 100644
--- a/Source/JavaScriptCore/dfg/DFGNode.h
+++ b/Source/JavaScriptCore/dfg/DFGNode.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,52 +26,44 @@
#ifndef DFGNode_h
#define DFGNode_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
-#include "BasicBlockLocation.h"
#include "CodeBlock.h"
+#include "CodeOrigin.h"
#include "DFGAbstractValue.h"
#include "DFGAdjacencyList.h"
#include "DFGArithMode.h"
#include "DFGArrayMode.h"
#include "DFGCommon.h"
-#include "DFGEpoch.h"
#include "DFGLazyJSValue.h"
-#include "DFGMultiGetByOffsetData.h"
#include "DFGNodeFlags.h"
-#include "DFGNodeOrigin.h"
#include "DFGNodeType.h"
-#include "DFGObjectMaterializationData.h"
-#include "DFGTransition.h"
-#include "DFGUseKind.h"
#include "DFGVariableAccessData.h"
-#include "GetByIdVariant.h"
#include "JSCJSValue.h"
#include "Operands.h"
-#include "PutByIdVariant.h"
#include "SpeculatedType.h"
#include "StructureSet.h"
-#include "TypeLocation.h"
#include "ValueProfile.h"
#include <wtf/ListDump.h>
namespace JSC { namespace DFG {
class Graph;
-class PromotedLocationDescriptor;
struct BasicBlock;
-struct StorageAccessData {
- PropertyOffset offset;
- unsigned identifierNumber;
-};
-
-struct MultiPutByOffsetData {
- unsigned identifierNumber;
- Vector<PutByIdVariant, 2> variants;
+struct StructureTransitionData {
+ Structure* previousStructure;
+ Structure* newStructure;
+
+ StructureTransitionData() { }
- bool writesStructures() const;
- bool reallocatesStorage() const;
+ StructureTransitionData(Structure* previousStructure, Structure* newStructure)
+ : previousStructure(previousStructure)
+ , newStructure(newStructure)
+ {
+ }
};
struct NewArrayBufferData {
@@ -80,55 +72,6 @@ struct NewArrayBufferData {
IndexingType indexingType;
};
-struct BranchTarget {
- BranchTarget()
- : block(0)
- , count(PNaN)
- {
- }
-
- explicit BranchTarget(BasicBlock* block)
- : block(block)
- , count(PNaN)
- {
- }
-
- void setBytecodeIndex(unsigned bytecodeIndex)
- {
- block = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(bytecodeIndex));
- }
- unsigned bytecodeIndex() const { return bitwise_cast<uintptr_t>(block); }
-
- void dump(PrintStream&) const;
-
- BasicBlock* block;
- float count;
-};
-
-struct BranchData {
- static BranchData withBytecodeIndices(
- unsigned takenBytecodeIndex, unsigned notTakenBytecodeIndex)
- {
- BranchData result;
- result.taken.block = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(takenBytecodeIndex));
- result.notTaken.block = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(notTakenBytecodeIndex));
- return result;
- }
-
- unsigned takenBytecodeIndex() const { return taken.bytecodeIndex(); }
- unsigned notTakenBytecodeIndex() const { return notTaken.bytecodeIndex(); }
-
- BasicBlock*& forCondition(bool condition)
- {
- if (condition)
- return taken.block;
- return notTaken.block;
- }
-
- BranchTarget taken;
- BranchTarget notTaken;
-};
-
// The SwitchData and associated data structures duplicate the information in
// JumpTable. The DFG may ultimately end up using the JumpTable, though it may
// instead decide to do something different - this is entirely up to the DFG.
@@ -142,6 +85,7 @@ struct BranchData {
// values.
struct SwitchCase {
SwitchCase()
+ : target(0)
{
}
@@ -155,12 +99,20 @@ struct SwitchCase {
{
SwitchCase result;
result.value = value;
- result.target.setBytecodeIndex(bytecodeIndex);
+ result.target = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(bytecodeIndex));
return result;
}
+ unsigned targetBytecodeIndex() const { return bitwise_cast<uintptr_t>(target); }
+
LazyJSValue value;
- BranchTarget target;
+ BasicBlock* target;
+};
+
+enum SwitchKind {
+ SwitchImm,
+ SwitchChar,
+ SwitchString
};
struct SwitchData {
@@ -168,57 +120,30 @@ struct SwitchData {
// constructing this should make sure to initialize everything they
// care about manually.
SwitchData()
- : kind(static_cast<SwitchKind>(-1))
+ : fallThrough(0)
+ , kind(static_cast<SwitchKind>(-1))
, switchTableIndex(UINT_MAX)
, didUseJumpTable(false)
{
}
+ void setFallThroughBytecodeIndex(unsigned bytecodeIndex)
+ {
+ fallThrough = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(bytecodeIndex));
+ }
+ unsigned fallThroughBytecodeIndex() const { return bitwise_cast<uintptr_t>(fallThrough); }
+
Vector<SwitchCase> cases;
- BranchTarget fallThrough;
+ BasicBlock* fallThrough;
SwitchKind kind;
unsigned switchTableIndex;
bool didUseJumpTable;
};
-struct CallVarargsData {
- int firstVarArgOffset;
-};
-
-struct LoadVarargsData {
- VirtualRegister start; // Local for the first element. This is the first actual argument, not this.
- VirtualRegister count; // Local for the count.
- VirtualRegister machineStart;
- VirtualRegister machineCount;
- unsigned offset; // Which array element to start with. Usually this is 0.
- unsigned mandatoryMinimum; // The number of elements on the stack that must be initialized; if the array is too short then the missing elements must get undefined. Does not include "this".
- unsigned limit; // Maximum number of elements to load. Includes "this".
-};
-
-struct StackAccessData {
- StackAccessData()
- : format(DeadFlush)
- {
- }
-
- StackAccessData(VirtualRegister local, FlushFormat format)
- : local(local)
- , format(format)
- {
- }
-
- VirtualRegister local;
- VirtualRegister machineLocal;
- FlushFormat format;
-
- FlushedAt flushedAt() { return FlushedAt(format, machineLocal); }
-};
-
// This type used in passing an immediate argument to Node constructor;
// distinguishes an immediate value (typically an index into a CodeBlock data structure -
// a constant index, argument, or identifier) from a Node*.
struct OpInfo {
- OpInfo() : m_value(0) { }
explicit OpInfo(int32_t value) : m_value(static_cast<uintptr_t>(value)) { }
explicit OpInfo(uint32_t value) : m_value(static_cast<uintptr_t>(value)) { }
#if OS(DARWIN) || USE(JSVALUE64)
@@ -236,112 +161,78 @@ struct Node {
Node() { }
- Node(NodeType op, NodeOrigin nodeOrigin, const AdjacencyList& children)
- : origin(nodeOrigin)
+ Node(NodeType op, CodeOrigin codeOrigin, const AdjacencyList& children)
+ : codeOrigin(codeOrigin)
+ , codeOriginForExitTarget(codeOrigin)
, children(children)
, m_virtualRegister(VirtualRegister())
, m_refCount(1)
, m_prediction(SpecNone)
- , owner(nullptr)
{
- m_misc.replacement = nullptr;
+ misc.replacement = 0;
setOpAndDefaultFlags(op);
}
// Construct a node with up to 3 children, no immediate value.
- Node(NodeType op, NodeOrigin nodeOrigin, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
- : origin(nodeOrigin)
+ Node(NodeType op, CodeOrigin codeOrigin, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
+ : codeOrigin(codeOrigin)
+ , codeOriginForExitTarget(codeOrigin)
, children(AdjacencyList::Fixed, child1, child2, child3)
, m_virtualRegister(VirtualRegister())
, m_refCount(1)
, m_prediction(SpecNone)
, m_opInfo(0)
, m_opInfo2(0)
- , owner(nullptr)
{
- m_misc.replacement = nullptr;
+ misc.replacement = 0;
setOpAndDefaultFlags(op);
ASSERT(!(m_flags & NodeHasVarArgs));
}
- // Construct a node with up to 3 children, no immediate value.
- Node(NodeFlags result, NodeType op, NodeOrigin nodeOrigin, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
- : origin(nodeOrigin)
- , children(AdjacencyList::Fixed, child1, child2, child3)
- , m_virtualRegister(VirtualRegister())
- , m_refCount(1)
- , m_prediction(SpecNone)
- , m_opInfo(0)
- , m_opInfo2(0)
- , owner(nullptr)
- {
- m_misc.replacement = nullptr;
- setOpAndDefaultFlags(op);
- setResult(result);
- ASSERT(!(m_flags & NodeHasVarArgs));
- }
-
// Construct a node with up to 3 children and an immediate value.
- Node(NodeType op, NodeOrigin nodeOrigin, OpInfo imm, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
- : origin(nodeOrigin)
+ Node(NodeType op, CodeOrigin codeOrigin, OpInfo imm, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
+ : codeOrigin(codeOrigin)
+ , codeOriginForExitTarget(codeOrigin)
, children(AdjacencyList::Fixed, child1, child2, child3)
, m_virtualRegister(VirtualRegister())
, m_refCount(1)
, m_prediction(SpecNone)
, m_opInfo(imm.m_value)
, m_opInfo2(0)
- , owner(nullptr)
{
- m_misc.replacement = nullptr;
+ misc.replacement = 0;
setOpAndDefaultFlags(op);
ASSERT(!(m_flags & NodeHasVarArgs));
}
- // Construct a node with up to 3 children and an immediate value.
- Node(NodeFlags result, NodeType op, NodeOrigin nodeOrigin, OpInfo imm, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
- : origin(nodeOrigin)
- , children(AdjacencyList::Fixed, child1, child2, child3)
- , m_virtualRegister(VirtualRegister())
- , m_refCount(1)
- , m_prediction(SpecNone)
- , m_opInfo(imm.m_value)
- , m_opInfo2(0)
- , owner(nullptr)
- {
- m_misc.replacement = nullptr;
- setOpAndDefaultFlags(op);
- setResult(result);
- ASSERT(!(m_flags & NodeHasVarArgs));
- }
-
// Construct a node with up to 3 children and two immediate values.
- Node(NodeType op, NodeOrigin nodeOrigin, OpInfo imm1, OpInfo imm2, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
- : origin(nodeOrigin)
+ Node(NodeType op, CodeOrigin codeOrigin, OpInfo imm1, OpInfo imm2, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
+ : codeOrigin(codeOrigin)
+ , codeOriginForExitTarget(codeOrigin)
, children(AdjacencyList::Fixed, child1, child2, child3)
, m_virtualRegister(VirtualRegister())
, m_refCount(1)
, m_prediction(SpecNone)
, m_opInfo(imm1.m_value)
, m_opInfo2(imm2.m_value)
- , owner(nullptr)
{
- m_misc.replacement = nullptr;
+ misc.replacement = 0;
setOpAndDefaultFlags(op);
ASSERT(!(m_flags & NodeHasVarArgs));
}
// Construct a node with a variable number of children and two immediate values.
- Node(VarArgTag, NodeType op, NodeOrigin nodeOrigin, OpInfo imm1, OpInfo imm2, unsigned firstChild, unsigned numChildren)
- : origin(nodeOrigin)
+ Node(VarArgTag, NodeType op, CodeOrigin codeOrigin, OpInfo imm1, OpInfo imm2, unsigned firstChild, unsigned numChildren)
+ : codeOrigin(codeOrigin)
+ , codeOriginForExitTarget(codeOrigin)
, children(AdjacencyList::Variable, firstChild, numChildren)
, m_virtualRegister(VirtualRegister())
, m_refCount(1)
, m_prediction(SpecNone)
, m_opInfo(imm1.m_value)
, m_opInfo2(imm2.m_value)
- , owner(nullptr)
{
- m_misc.replacement = nullptr;
+ misc.replacement = 0;
setOpAndDefaultFlags(op);
ASSERT(m_flags & NodeHasVarArgs);
}
@@ -364,6 +255,7 @@ struct Node {
bool mergeFlags(NodeFlags flags)
{
+ ASSERT(!(flags & NodeDoesNotExit));
NodeFlags newFlags = m_flags | flags;
if (newFlags == m_flags)
return false;
@@ -373,6 +265,7 @@ struct Node {
bool filterFlags(NodeFlags flags)
{
+ ASSERT(flags & NodeDoesNotExit);
NodeFlags newFlags = m_flags & flags;
if (newFlags == m_flags)
return false;
@@ -385,108 +278,104 @@ struct Node {
return filterFlags(~flags);
}
- void setResult(NodeFlags result)
- {
- ASSERT(!(result & ~NodeResultMask));
- clearFlags(NodeResultMask);
- mergeFlags(result);
- }
-
- NodeFlags result() const
- {
- return flags() & NodeResultMask;
- }
-
void setOpAndDefaultFlags(NodeType op)
{
m_op = op;
m_flags = defaultFlags(op);
}
- void remove();
-
- void convertToCheckStructure(StructureSet* set)
+ void convertToPhantom()
{
- setOpAndDefaultFlags(CheckStructure);
- m_opInfo = bitwise_cast<uintptr_t>(set);
+ setOpAndDefaultFlags(Phantom);
}
- void convertToCheckStructureImmediate(Node* structure)
+ void convertToPhantomUnchecked()
{
- ASSERT(op() == CheckStructure);
- m_op = CheckStructureImmediate;
- children.setChild1(Edge(structure, CellUse));
+ setOpAndDefaultFlags(Phantom);
}
-
- void replaceWith(Node* other)
+
+ void convertToIdentity()
{
- remove();
- setReplacement(other);
+ RELEASE_ASSERT(child1());
+ RELEASE_ASSERT(!child2());
+ setOpAndDefaultFlags(Identity);
}
- void convertToIdentity();
- void convertToIdentityOn(Node*);
-
bool mustGenerate()
{
return m_flags & NodeMustGenerate;
}
+ void setCanExit(bool exits)
+ {
+ if (exits)
+ m_flags &= ~NodeDoesNotExit;
+ else
+ m_flags |= NodeDoesNotExit;
+ }
+
+ bool canExit()
+ {
+ return !(m_flags & NodeDoesNotExit);
+ }
+
bool isConstant()
{
- switch (op()) {
- case JSConstant:
- case DoubleConstant:
- case Int52Constant:
- return true;
- default:
- return false;
- }
+ return op() == JSConstant;
+ }
+
+ bool isWeakConstant()
+ {
+ return op() == WeakJSConstant;
+ }
+
+ bool isStronglyProvedConstantIn(InlineCallFrame* inlineCallFrame)
+ {
+ return !!(flags() & NodeIsStaticConstant)
+ && codeOrigin.inlineCallFrame == inlineCallFrame;
+ }
+
+ bool isStronglyProvedConstantIn(const CodeOrigin& codeOrigin)
+ {
+ return isStronglyProvedConstantIn(codeOrigin.inlineCallFrame);
+ }
+
+ bool isPhantomArguments()
+ {
+ return op() == PhantomArguments;
}
bool hasConstant()
{
switch (op()) {
case JSConstant:
- case DoubleConstant:
- case Int52Constant:
- return true;
-
- case PhantomDirectArguments:
- case PhantomClonedArguments:
- // These pretend to be the empty value constant for the benefit of the DFG backend, which
- // otherwise wouldn't take kindly to a node that doesn't compute a value.
+ case WeakJSConstant:
+ case PhantomArguments:
return true;
-
default:
return false;
}
}
- FrozenValue* constant()
+ unsigned constantNumber()
{
- ASSERT(hasConstant());
-
- if (op() == PhantomDirectArguments || op() == PhantomClonedArguments) {
- // These pretend to be the empty value constant for the benefit of the DFG backend, which
- // otherwise wouldn't take kindly to a node that doesn't compute a value.
- return FrozenValue::emptySingleton();
- }
-
- return bitwise_cast<FrozenValue*>(m_opInfo);
+ ASSERT(isConstant());
+ return m_opInfo;
}
- // Don't call this directly - use Graph::convertToConstant() instead!
- void convertToConstant(FrozenValue* value)
+ void convertToConstant(unsigned constantNumber)
{
- if (hasDoubleResult())
- m_op = DoubleConstant;
- else if (hasInt52Result())
- m_op = Int52Constant;
- else
- m_op = JSConstant;
- m_flags &= ~NodeMustGenerate;
- m_opInfo = bitwise_cast<uintptr_t>(value);
+ m_op = JSConstant;
+ m_flags &= ~(NodeMustGenerate | NodeMightClobber | NodeClobbersWorld);
+ m_opInfo = constantNumber;
+ children.reset();
+ }
+
+ void convertToWeakConstant(JSCell* cell)
+ {
+ m_op = WeakJSConstant;
+ m_flags &= ~(NodeMustGenerate | NodeMightClobber | NodeClobbersWorld);
+ m_opInfo = bitwise_cast<uintptr_t>(cell);
children.reset();
}
@@ -495,111 +384,54 @@ struct Node {
ASSERT(op() == GetIndexedPropertyStorage);
m_op = ConstantStoragePointer;
m_opInfo = bitwise_cast<uintptr_t>(pointer);
- children.reset();
}
void convertToGetLocalUnlinked(VirtualRegister local)
{
m_op = GetLocalUnlinked;
- m_flags &= ~NodeMustGenerate;
+ m_flags &= ~(NodeMustGenerate | NodeMightClobber | NodeClobbersWorld);
m_opInfo = local.offset();
m_opInfo2 = VirtualRegister().offset();
children.reset();
}
- void convertToPutStack(StackAccessData* data)
+ void convertToStructureTransitionWatchpoint(Structure* structure)
{
- m_op = PutStack;
- m_flags |= NodeMustGenerate;
- m_opInfo = bitwise_cast<uintptr_t>(data);
- m_opInfo2 = 0;
+ ASSERT(m_op == CheckStructure || m_op == ArrayifyToStructure);
+ ASSERT(!child2());
+ ASSERT(!child3());
+ m_opInfo = bitwise_cast<uintptr_t>(structure);
+ m_op = StructureTransitionWatchpoint;
}
- void convertToGetStack(StackAccessData* data)
+ void convertToStructureTransitionWatchpoint()
{
- m_op = GetStack;
- m_flags &= ~NodeMustGenerate;
- m_opInfo = bitwise_cast<uintptr_t>(data);
- m_opInfo2 = 0;
- children.reset();
+ convertToStructureTransitionWatchpoint(structureSet().singletonStructure());
}
- void convertToGetByOffset(StorageAccessData& data, Edge storage)
+ void convertToGetByOffset(unsigned storageAccessDataIndex, Edge storage)
{
- ASSERT(m_op == GetById || m_op == GetByIdFlush || m_op == MultiGetByOffset);
- m_opInfo = bitwise_cast<uintptr_t>(&data);
+ ASSERT(m_op == GetById || m_op == GetByIdFlush);
+ m_opInfo = storageAccessDataIndex;
children.setChild2(children.child1());
children.child2().setUseKind(KnownCellUse);
children.setChild1(storage);
m_op = GetByOffset;
- m_flags &= ~NodeMustGenerate;
+ m_flags &= ~NodeClobbersWorld;
}
- void convertToMultiGetByOffset(MultiGetByOffsetData* data)
+ void convertToPutByOffset(unsigned storageAccessDataIndex, Edge storage)
{
- ASSERT(m_op == GetById || m_op == GetByIdFlush);
- m_opInfo = bitwise_cast<intptr_t>(data);
- child1().setUseKind(CellUse);
- m_op = MultiGetByOffset;
- ASSERT(m_flags & NodeMustGenerate);
- }
-
- void convertToPutByOffset(StorageAccessData& data, Edge storage)
- {
- ASSERT(m_op == PutById || m_op == PutByIdDirect || m_op == PutByIdFlush || m_op == MultiPutByOffset);
- m_opInfo = bitwise_cast<uintptr_t>(&data);
+ ASSERT(m_op == PutById || m_op == PutByIdDirect);
+ m_opInfo = storageAccessDataIndex;
children.setChild3(children.child2());
children.setChild2(children.child1());
children.setChild1(storage);
m_op = PutByOffset;
+ m_flags &= ~NodeClobbersWorld;
}
- void convertToMultiPutByOffset(MultiPutByOffsetData* data)
- {
- ASSERT(m_op == PutById || m_op == PutByIdDirect || m_op == PutByIdFlush);
- m_opInfo = bitwise_cast<intptr_t>(data);
- m_op = MultiPutByOffset;
- }
-
- void convertToPutHint(const PromotedLocationDescriptor&, Node* base, Node* value);
-
- void convertToPutByOffsetHint();
- void convertToPutStructureHint(Node* structure);
- void convertToPutClosureVarHint();
-
- void convertToPhantomNewObject()
- {
- ASSERT(m_op == NewObject || m_op == MaterializeNewObject);
- m_op = PhantomNewObject;
- m_flags &= ~NodeHasVarArgs;
- m_flags |= NodeMustGenerate;
- m_opInfo = 0;
- m_opInfo2 = 0;
- children = AdjacencyList();
- }
-
- void convertToPhantomNewFunction()
- {
- ASSERT(m_op == NewFunction);
- m_op = PhantomNewFunction;
- m_flags |= NodeMustGenerate;
- m_opInfo = 0;
- m_opInfo2 = 0;
- children = AdjacencyList();
- }
-
- void convertToPhantomCreateActivation()
- {
- ASSERT(m_op == CreateActivation || m_op == MaterializeCreateActivation);
- m_op = PhantomCreateActivation;
- m_flags &= ~NodeHasVarArgs;
- m_flags |= NodeMustGenerate;
- m_opInfo = 0;
- m_opInfo2 = 0;
- children = AdjacencyList();
- }
-
- void convertPhantomToPhantomLocal()
+ void convertToPhantomLocal()
{
ASSERT(m_op == Phantom && (child1()->op() == Phi || child1()->op() == SetLocal || child1()->op() == SetArgument));
m_op = PhantomLocal;
@@ -607,13 +439,6 @@ struct Node {
children.setChild1(Edge());
}
- void convertFlushToPhantomLocal()
- {
- ASSERT(m_op == Flush);
- m_op = PhantomLocal;
- children = AdjacencyList();
- }
-
void convertToGetLocal(VariableAccessData* variable, Node* phi)
{
ASSERT(m_op == GetLocalUnlinked);
@@ -628,101 +453,53 @@ struct Node {
ASSERT(m_op == ToPrimitive);
m_op = ToString;
}
-
- void convertToArithSqrt()
- {
- ASSERT(m_op == ArithPow);
- child2() = Edge();
- m_op = ArithSqrt;
- }
- JSValue asJSValue()
- {
- return constant()->value();
- }
-
- bool isInt32Constant()
- {
- return isConstant() && constant()->value().isInt32();
- }
-
- int32_t asInt32()
- {
- return asJSValue().asInt32();
- }
-
- uint32_t asUInt32()
- {
- return asInt32();
- }
-
- bool isDoubleConstant()
- {
- return isConstant() && constant()->value().isDouble();
- }
-
- bool isNumberConstant()
+ JSCell* weakConstant()
{
- return isConstant() && constant()->value().isNumber();
+ ASSERT(op() == WeakJSConstant);
+ return bitwise_cast<JSCell*>(m_opInfo);
}
- double asNumber()
+ JSValue valueOfJSConstant(CodeBlock* codeBlock)
{
- return asJSValue().asNumber();
- }
-
- bool isMachineIntConstant()
- {
- return isConstant() && constant()->value().isMachineInt();
- }
-
- int64_t asMachineInt()
- {
- return asJSValue().asMachineInt();
- }
-
- bool isBooleanConstant()
- {
- return isConstant() && constant()->value().isBoolean();
- }
-
- bool asBoolean()
- {
- return constant()->value().asBoolean();
- }
-
- bool isCellConstant()
- {
- return isConstant() && constant()->value() && constant()->value().isCell();
+ switch (op()) {
+ case WeakJSConstant:
+ return JSValue(weakConstant());
+ case JSConstant:
+ return codeBlock->constantRegister(FirstConstantRegisterIndex + constantNumber()).get();
+ case PhantomArguments:
+ return JSValue();
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return JSValue(); // Have to return something in release mode.
+ }
}
-
- JSCell* asCell()
+
+ bool isInt32Constant(CodeBlock* codeBlock)
{
- return constant()->value().asCell();
+ return isConstant() && valueOfJSConstant(codeBlock).isInt32();
}
-
- template<typename T>
- T dynamicCastConstant()
+
+ bool isDoubleConstant(CodeBlock* codeBlock)
{
- if (!isCellConstant())
- return nullptr;
- return jsDynamicCast<T>(asCell());
+ bool result = isConstant() && valueOfJSConstant(codeBlock).isDouble();
+ if (result)
+ ASSERT(!isInt32Constant(codeBlock));
+ return result;
}
- template<typename T>
- T castConstant()
+ bool isNumberConstant(CodeBlock* codeBlock)
{
- T result = dynamicCastConstant<T>();
- RELEASE_ASSERT(result);
+ bool result = isConstant() && valueOfJSConstant(codeBlock).isNumber();
+ ASSERT(result == (isInt32Constant(codeBlock) || isDoubleConstant(codeBlock)));
return result;
}
-
- JSValue initializationValueForActivation() const
+
+ bool isBooleanConstant(CodeBlock* codeBlock)
{
- ASSERT(op() == CreateActivation);
- return bitwise_cast<FrozenValue*>(m_opInfo2)->value();
+ return isConstant() && valueOfJSConstant(codeBlock).isBoolean();
}
-
+
bool containsMovHint()
{
switch (op()) {
@@ -740,16 +517,6 @@ struct Node {
return hasVariableAccessData(graph);
}
- // This is useful for debugging code, where a node that should have a variable
- // access data doesn't have one because it hasn't been initialized yet.
- VariableAccessData* tryGetVariableAccessData()
- {
- VariableAccessData* result = reinterpret_cast<VariableAccessData*>(m_opInfo);
- if (!result)
- return 0;
- return result->find();
- }
-
VariableAccessData* variableAccessData()
{
return reinterpret_cast<VariableAccessData*>(m_opInfo)->find();
@@ -772,7 +539,6 @@ struct Node {
case ExtractOSREntryLocal:
case MovHint:
case ZombieHint:
- case KillStack:
return true;
default:
return false;
@@ -802,23 +568,6 @@ struct Node {
return VirtualRegister(m_opInfo2);
}
- bool hasStackAccessData()
- {
- switch (op()) {
- case PutStack:
- case GetStack:
- return true;
- default:
- return false;
- }
- }
-
- StackAccessData* stackAccessData()
- {
- ASSERT(hasStackAccessData());
- return bitwise_cast<StackAccessData*>(m_opInfo);
- }
-
bool hasPhi()
{
return op() == Upsilon;
@@ -832,7 +581,14 @@ struct Node {
bool isStoreBarrier()
{
- return op() == StoreBarrier;
+ switch (op()) {
+ case StoreBarrier:
+ case ConditionalStoreBarrier:
+ case StoreBarrierWithNullCheck:
+ return true;
+ default:
+ return false;
+ }
}
bool hasIdentifier()
@@ -841,7 +597,6 @@ struct Node {
case GetById:
case GetByIdFlush:
case PutById:
- case PutByIdFlush:
case PutByIdDirect:
return true;
default:
@@ -855,20 +610,33 @@ struct Node {
return m_opInfo;
}
- bool hasPromotedLocationDescriptor()
+ bool hasArithNodeFlags()
{
- return op() == PutHint;
+ switch (op()) {
+ case UInt32ToNumber:
+ case ArithAdd:
+ case ArithSub:
+ case ArithNegate:
+ case ArithMul:
+ case ArithAbs:
+ case ArithMin:
+ case ArithMax:
+ case ArithMod:
+ case ArithDiv:
+ case ValueAdd:
+ return true;
+ default:
+ return false;
+ }
}
- PromotedLocationDescriptor promotedLocationDescriptor();
-
// This corrects the arithmetic node flags, so that irrelevant bits are
// ignored. In particular, anything other than ArithMul does not need
// to know if it can speculate on negative zero.
NodeFlags arithNodeFlags()
{
NodeFlags result = m_flags & NodeArithFlagsMask;
- if (op() == ArithMul || op() == ArithDiv || op() == ArithMod || op() == ArithNegate || op() == ArithPow || op() == ArithRound || op() == DoubleAsInt32)
+ if (op() == ArithMul || op() == ArithDiv || op() == ArithMod || op() == ArithNegate || op() == DoubleAsInt32)
return result;
return result & ~NodeBytecodeNeedsNegZero;
}
@@ -960,26 +728,15 @@ struct Node {
return m_opInfo;
}
- bool hasScopeOffset()
+ bool hasVarNumber()
{
return op() == GetClosureVar || op() == PutClosureVar;
}
- ScopeOffset scopeOffset()
- {
- ASSERT(hasScopeOffset());
- return ScopeOffset(m_opInfo);
- }
-
- bool hasDirectArgumentsOffset()
+ int varNumber()
{
- return op() == GetFromArguments || op() == PutToArguments;
- }
-
- DirectArgumentsOffset capturedArgumentsOffset()
- {
- ASSERT(hasDirectArgumentsOffset());
- return DirectArgumentsOffset(m_opInfo);
+ ASSERT(hasVarNumber());
+ return m_opInfo;
}
bool hasRegisterPointer()
@@ -987,89 +744,39 @@ struct Node {
return op() == GetGlobalVar || op() == PutGlobalVar;
}
- WriteBarrier<Unknown>* variablePointer()
+ WriteBarrier<Unknown>* registerPointer()
{
return bitwise_cast<WriteBarrier<Unknown>*>(m_opInfo);
}
- bool hasCallVarargsData()
- {
- switch (op()) {
- case CallVarargs:
- case CallForwardVarargs:
- case ConstructVarargs:
- case ConstructForwardVarargs:
- return true;
- default:
- return false;
- }
- }
-
- CallVarargsData* callVarargsData()
- {
- ASSERT(hasCallVarargsData());
- return bitwise_cast<CallVarargsData*>(m_opInfo);
- }
-
- bool hasLoadVarargsData()
- {
- return op() == LoadVarargs || op() == ForwardVarargs;
- }
-
- LoadVarargsData* loadVarargsData()
- {
- ASSERT(hasLoadVarargsData());
- return bitwise_cast<LoadVarargsData*>(m_opInfo);
- }
-
bool hasResult()
{
- return !!result();
+ return m_flags & NodeResultMask;
}
bool hasInt32Result()
{
- return result() == NodeResultInt32;
- }
-
- bool hasInt52Result()
- {
- return result() == NodeResultInt52;
+ return (m_flags & NodeResultMask) == NodeResultInt32;
}
bool hasNumberResult()
{
- return result() == NodeResultNumber;
- }
-
- bool hasDoubleResult()
- {
- return result() == NodeResultDouble;
+ return (m_flags & NodeResultMask) == NodeResultNumber;
}
bool hasJSResult()
{
- return result() == NodeResultJS;
+ return (m_flags & NodeResultMask) == NodeResultJS;
}
bool hasBooleanResult()
{
- return result() == NodeResultBoolean;
+ return (m_flags & NodeResultMask) == NodeResultBoolean;
}
bool hasStorageResult()
{
- return result() == NodeResultStorage;
- }
-
- UseKind defaultUseKind()
- {
- return useKindForResult(result());
- }
-
- Edge defaultEdge()
- {
- return Edge(this, defaultUseKind());
+ return (m_flags & NodeResultMask) == NodeResultStorage;
}
bool isJump()
@@ -1101,22 +808,40 @@ struct Node {
}
}
- unsigned targetBytecodeOffsetDuringParsing()
+ unsigned takenBytecodeOffsetDuringParsing()
{
- ASSERT(isJump());
+ ASSERT(isBranch() || isJump());
return m_opInfo;
}
- BasicBlock*& targetBlock()
+ unsigned notTakenBytecodeOffsetDuringParsing()
+ {
+ ASSERT(isBranch());
+ return m_opInfo2;
+ }
+
+ void setTakenBlock(BasicBlock* block)
+ {
+ ASSERT(isBranch() || isJump());
+ m_opInfo = bitwise_cast<uintptr_t>(block);
+ }
+
+ void setNotTakenBlock(BasicBlock* block)
+ {
+ ASSERT(isBranch());
+ m_opInfo2 = bitwise_cast<uintptr_t>(block);
+ }
+
+ BasicBlock*& takenBlock()
{
- ASSERT(isJump());
+ ASSERT(isBranch() || isJump());
return *bitwise_cast<BasicBlock**>(&m_opInfo);
}
- BranchData* branchData()
+ BasicBlock*& notTakenBlock()
{
ASSERT(isBranch());
- return bitwise_cast<BranchData*>(m_opInfo);
+ return *bitwise_cast<BasicBlock**>(&m_opInfo2);
}
SwitchData* switchData()
@@ -1143,115 +868,39 @@ struct Node {
{
if (isSwitch()) {
if (index < switchData()->cases.size())
- return switchData()->cases[index].target.block;
+ return switchData()->cases[index].target;
RELEASE_ASSERT(index == switchData()->cases.size());
- return switchData()->fallThrough.block;
+ return switchData()->fallThrough;
}
switch (index) {
case 0:
- if (isJump())
- return targetBlock();
- return branchData()->taken.block;
+ return takenBlock();
case 1:
- return branchData()->notTaken.block;
+ return notTakenBlock();
default:
RELEASE_ASSERT_NOT_REACHED();
- return targetBlock();
+ return takenBlock();
}
}
- class SuccessorsIterable {
- public:
- SuccessorsIterable()
- : m_terminal(nullptr)
- {
- }
-
- SuccessorsIterable(Node* terminal)
- : m_terminal(terminal)
- {
- }
-
- class iterator {
- public:
- iterator()
- : m_terminal(nullptr)
- , m_index(UINT_MAX)
- {
- }
-
- iterator(Node* terminal, unsigned index)
- : m_terminal(terminal)
- , m_index(index)
- {
- }
-
- BasicBlock* operator*()
- {
- return m_terminal->successor(m_index);
- }
-
- iterator& operator++()
- {
- m_index++;
- return *this;
- }
-
- bool operator==(const iterator& other) const
- {
- return m_index == other.m_index;
- }
-
- bool operator!=(const iterator& other) const
- {
- return !(*this == other);
- }
- private:
- Node* m_terminal;
- unsigned m_index;
- };
-
- iterator begin()
- {
- return iterator(m_terminal, 0);
- }
-
- iterator end()
- {
- return iterator(m_terminal, m_terminal->numSuccessors());
- }
-
- private:
- Node* m_terminal;
- };
-
- SuccessorsIterable successors()
- {
- return SuccessorsIterable(this);
- }
-
BasicBlock*& successorForCondition(bool condition)
{
- return branchData()->forCondition(condition);
+ ASSERT(isBranch());
+ return condition ? takenBlock() : notTakenBlock();
}
bool hasHeapPrediction()
{
switch (op()) {
- case ArithRound:
- case GetDirectPname:
case GetById:
case GetByIdFlush:
case GetByVal:
+ case GetMyArgumentByVal:
+ case GetMyArgumentByValSafe:
case Call:
case Construct:
- case CallVarargs:
- case ConstructVarargs:
- case CallForwardVarargs:
case GetByOffset:
- case MultiGetByOffset:
case GetClosureVar:
- case GetFromArguments:
case ArrayPop:
case ArrayPush:
case RegExpExec:
@@ -1268,81 +917,78 @@ struct Node {
ASSERT(hasHeapPrediction());
return static_cast<SpeculatedType>(m_opInfo2);
}
-
- void setHeapPrediction(SpeculatedType prediction)
+
+ bool predictHeap(SpeculatedType prediction)
{
ASSERT(hasHeapPrediction());
- m_opInfo2 = prediction;
+
+ return mergeSpeculation(m_opInfo2, prediction);
}
- bool hasCellOperand()
+ bool hasFunction()
{
switch (op()) {
- case CheckCell:
- case NewFunction:
- case CreateActivation:
- case MaterializeCreateActivation:
+ case CheckFunction:
+ case AllocationProfileWatchpoint:
return true;
default:
return false;
}
}
- FrozenValue* cellOperand()
+ JSCell* function()
{
- ASSERT(hasCellOperand());
- return reinterpret_cast<FrozenValue*>(m_opInfo);
+ ASSERT(hasFunction());
+ JSCell* result = reinterpret_cast<JSFunction*>(m_opInfo);
+ ASSERT(JSValue(result).isFunction());
+ return result;
}
- template<typename T>
- T castOperand()
+ bool hasExecutable()
{
- return cellOperand()->cast<T>();
+ return op() == CheckExecutable;
}
- void setCellOperand(FrozenValue* value)
+ ExecutableBase* executable()
{
- ASSERT(hasCellOperand());
- m_opInfo = bitwise_cast<uintptr_t>(value);
+ return jsCast<ExecutableBase*>(reinterpret_cast<JSCell*>(m_opInfo));
}
- bool hasWatchpointSet()
+ bool hasVariableWatchpointSet()
{
- return op() == NotifyWrite;
+ return op() == NotifyWrite || op() == VariableWatchpoint;
}
- WatchpointSet* watchpointSet()
+ VariableWatchpointSet* variableWatchpointSet()
{
- ASSERT(hasWatchpointSet());
- return reinterpret_cast<WatchpointSet*>(m_opInfo);
+ return reinterpret_cast<VariableWatchpointSet*>(m_opInfo);
}
- bool hasStoragePointer()
+ bool hasTypedArray()
{
- return op() == ConstantStoragePointer;
+ return op() == TypedArrayWatchpoint;
}
- void* storagePointer()
+ JSArrayBufferView* typedArray()
{
- ASSERT(hasStoragePointer());
- return reinterpret_cast<void*>(m_opInfo);
+ return reinterpret_cast<JSArrayBufferView*>(m_opInfo);
}
-
- bool hasUidOperand()
+
+ bool hasStoragePointer()
{
- return op() == CheckIdent;
+ return op() == ConstantStoragePointer;
}
-
- UniquedStringImpl* uidOperand()
+
+ void* storagePointer()
{
- ASSERT(hasUidOperand());
- return reinterpret_cast<UniquedStringImpl*>(m_opInfo);
+ return reinterpret_cast<void*>(m_opInfo);
}
- bool hasTransition()
+ bool hasStructureTransitionData()
{
switch (op()) {
case PutStructure:
+ case PhantomPutStructure:
case AllocatePropertyStorage:
case ReallocatePropertyStorage:
return true;
@@ -1351,18 +997,16 @@ struct Node {
}
}
- Transition* transition()
+ StructureTransitionData& structureTransitionData()
{
- ASSERT(hasTransition());
- return reinterpret_cast<Transition*>(m_opInfo);
+ ASSERT(hasStructureTransitionData());
+ return *reinterpret_cast<StructureTransitionData*>(m_opInfo);
}
bool hasStructureSet()
{
switch (op()) {
case CheckStructure:
- case CheckStructureImmediate:
- case MaterializeNewObject:
return true;
default:
return false;
@@ -1378,6 +1022,7 @@ struct Node {
bool hasStructure()
{
switch (op()) {
+ case StructureTransitionWatchpoint:
case ArrayifyToStructure:
case NewObject:
case NewStringObject:
@@ -1395,136 +1040,47 @@ struct Node {
bool hasStorageAccessData()
{
- switch (op()) {
- case GetByOffset:
- case PutByOffset:
- case GetGetterSetterByOffset:
- return true;
- default:
- return false;
- }
+ return op() == GetByOffset || op() == PutByOffset;
}
- StorageAccessData& storageAccessData()
+ unsigned storageAccessDataIndex()
{
ASSERT(hasStorageAccessData());
- return *bitwise_cast<StorageAccessData*>(m_opInfo);
- }
-
- bool hasMultiGetByOffsetData()
- {
- return op() == MultiGetByOffset;
- }
-
- MultiGetByOffsetData& multiGetByOffsetData()
- {
- ASSERT(hasMultiGetByOffsetData());
- return *reinterpret_cast<MultiGetByOffsetData*>(m_opInfo);
+ return m_opInfo;
}
- bool hasMultiPutByOffsetData()
+ bool hasFunctionDeclIndex()
{
- return op() == MultiPutByOffset;
+ return op() == NewFunction
+ || op() == NewFunctionNoCheck;
}
- MultiPutByOffsetData& multiPutByOffsetData()
+ unsigned functionDeclIndex()
{
- ASSERT(hasMultiPutByOffsetData());
- return *reinterpret_cast<MultiPutByOffsetData*>(m_opInfo);
+ ASSERT(hasFunctionDeclIndex());
+ return m_opInfo;
}
- bool hasObjectMaterializationData()
+ bool hasFunctionExprIndex()
{
- switch (op()) {
- case MaterializeNewObject:
- case MaterializeCreateActivation:
- return true;
-
- default:
- return false;
- }
+ return op() == NewFunctionExpression;
}
- ObjectMaterializationData& objectMaterializationData()
+ unsigned functionExprIndex()
{
- ASSERT(hasObjectMaterializationData());
- return *reinterpret_cast<ObjectMaterializationData*>(m_opInfo2);
- }
-
- bool isObjectAllocation()
- {
- switch (op()) {
- case NewObject:
- case MaterializeNewObject:
- return true;
- default:
- return false;
- }
+ ASSERT(hasFunctionExprIndex());
+ return m_opInfo;
}
- bool isPhantomObjectAllocation()
+ bool hasSymbolTable()
{
- switch (op()) {
- case PhantomNewObject:
- return true;
- default:
- return false;
- }
+ return op() == FunctionReentryWatchpoint;
}
- bool isActivationAllocation()
- {
- switch (op()) {
- case CreateActivation:
- case MaterializeCreateActivation:
- return true;
- default:
- return false;
- }
- }
-
- bool isPhantomActivationAllocation()
- {
- switch (op()) {
- case PhantomCreateActivation:
- return true;
- default:
- return false;
- }
- }
-
- bool isFunctionAllocation()
- {
- switch (op()) {
- case NewFunction:
- return true;
- default:
- return false;
- }
- }
-
- bool isPhantomFunctionAllocation()
+ SymbolTable* symbolTable()
{
- switch (op()) {
- case PhantomNewFunction:
- return true;
- default:
- return false;
- }
- }
-
- bool isPhantomAllocation()
- {
- switch (op()) {
- case PhantomNewObject:
- case PhantomDirectArguments:
- case PhantomClonedArguments:
- case PhantomNewFunction:
- case PhantomCreateActivation:
- return true;
- default:
- return false;
- }
+ ASSERT(hasSymbolTable());
+ return reinterpret_cast<SymbolTable*>(m_opInfo);
}
bool hasArrayMode()
@@ -1543,7 +1099,6 @@ struct Node {
case ArrayifyToStructure:
case ArrayPush:
case ArrayPop:
- case HasIndexedProperty:
return true;
default:
return false;
@@ -1594,23 +1149,6 @@ struct Node {
{
m_opInfo = mode;
}
-
- bool hasArithRoundingMode()
- {
- return op() == ArithRound;
- }
-
- Arith::RoundingMode arithRoundingMode()
- {
- ASSERT(hasArithRoundingMode());
- return static_cast<Arith::RoundingMode>(m_opInfo);
- }
-
- void setArithRoundingMode(Arith::RoundingMode mode)
- {
- ASSERT(hasArithRoundingMode());
- m_opInfo = static_cast<uintptr_t>(mode);
- }
bool hasVirtualRegister()
{
@@ -1646,9 +1184,19 @@ struct Node {
return m_refCount;
}
- bool isSemanticallySkippable()
+ bool willHaveCodeGenOrOSR()
{
- return op() == CountExecution;
+ switch (op()) {
+ case SetLocal:
+ case MovHint:
+ case ZombieHint:
+ case PhantomArguments:
+ return true;
+ case Phantom:
+ return child1().useKindUnchecked() != UntypedUse || child2().useKindUnchecked() != UntypedUse || child3().useKindUnchecked() != UntypedUse;
+ default:
+ return shouldGenerate();
+ }
}
unsigned refCount()
@@ -1715,25 +1263,9 @@ struct Node {
return child1().useKind();
}
- bool isBinaryUseKind(UseKind left, UseKind right)
- {
- return child1().useKind() == left && child2().useKind() == right;
- }
-
bool isBinaryUseKind(UseKind useKind)
{
- return isBinaryUseKind(useKind, useKind);
- }
-
- Edge childFor(UseKind useKind)
- {
- if (child1().useKind() == useKind)
- return child1();
- if (child2().useKind() == useKind)
- return child2();
- if (child3().useKind() == useKind)
- return child3();
- return Edge();
+ return child1().useKind() == useKind && child2().useKind() == useKind;
}
SpeculatedType prediction()
@@ -1751,34 +1283,29 @@ struct Node {
return isInt32Speculation(prediction());
}
- bool sawBooleans()
+ bool shouldSpeculateInt32ForArithmetic()
{
- return !!(prediction() & SpecBoolean);
+ return isInt32SpeculationForArithmetic(prediction());
}
- bool shouldSpeculateInt32OrBoolean()
+ bool shouldSpeculateInt32ExpectingDefined()
{
- return isInt32OrBooleanSpeculation(prediction());
+ return isInt32SpeculationExpectingDefined(prediction());
}
- bool shouldSpeculateInt32ForArithmetic()
- {
- return isInt32SpeculationForArithmetic(prediction());
- }
-
- bool shouldSpeculateInt32OrBooleanForArithmetic()
+ bool shouldSpeculateMachineInt()
{
- return isInt32OrBooleanSpeculationForArithmetic(prediction());
+ return isMachineIntSpeculation(prediction());
}
- bool shouldSpeculateInt32OrBooleanExpectingDefined()
+ bool shouldSpeculateMachineIntForArithmetic()
{
- return isInt32OrBooleanSpeculationExpectingDefined(prediction());
+ return isMachineIntSpeculationForArithmetic(prediction());
}
- bool shouldSpeculateMachineInt()
+ bool shouldSpeculateMachineIntExpectingDefined()
{
- return isMachineIntSpeculation(prediction());
+ return isMachineIntSpeculationExpectingDefined(prediction());
}
bool shouldSpeculateDouble()
@@ -1786,9 +1313,9 @@ struct Node {
return isDoubleSpeculation(prediction());
}
- bool shouldSpeculateDoubleReal()
+ bool shouldSpeculateDoubleForArithmetic()
{
- return isDoubleRealSpeculation(prediction());
+ return isDoubleSpeculationForArithmetic(prediction());
}
bool shouldSpeculateNumber()
@@ -1796,40 +1323,20 @@ struct Node {
return isFullNumberSpeculation(prediction());
}
- bool shouldSpeculateNumberOrBoolean()
- {
- return isFullNumberOrBooleanSpeculation(prediction());
- }
-
- bool shouldSpeculateNumberOrBooleanExpectingDefined()
+ bool shouldSpeculateNumberExpectingDefined()
{
- return isFullNumberOrBooleanSpeculationExpectingDefined(prediction());
+ return isFullNumberSpeculationExpectingDefined(prediction());
}
bool shouldSpeculateBoolean()
{
return isBooleanSpeculation(prediction());
}
-
- bool shouldSpeculateOther()
- {
- return isOtherSpeculation(prediction());
- }
-
- bool shouldSpeculateMisc()
- {
- return isMiscSpeculation(prediction());
- }
bool shouldSpeculateStringIdent()
{
return isStringIdentSpeculation(prediction());
}
-
- bool shouldSpeculateNotStringVar()
- {
- return isNotStringVarSpeculation(prediction());
- }
bool shouldSpeculateString()
{
@@ -1861,14 +1368,9 @@ struct Node {
return isArraySpeculation(prediction());
}
- bool shouldSpeculateDirectArguments()
+ bool shouldSpeculateArguments()
{
- return isDirectArgumentsSpeculation(prediction());
- }
-
- bool shouldSpeculateScopedArguments()
- {
- return isScopedArgumentsSpeculation(prediction());
+ return isArgumentsSpeculation(prediction());
}
bool shouldSpeculateInt8Array()
@@ -1936,11 +1438,6 @@ struct Node {
return isCellSpeculation(prediction());
}
- bool shouldSpeculateNotCell()
- {
- return isNotCellSpeculation(prediction());
- }
-
static bool shouldSpeculateBoolean(Node* op1, Node* op2)
{
return op1->shouldSpeculateBoolean() && op2->shouldSpeculateBoolean();
@@ -1951,22 +1448,14 @@ struct Node {
return op1->shouldSpeculateInt32() && op2->shouldSpeculateInt32();
}
- static bool shouldSpeculateInt32OrBoolean(Node* op1, Node* op2)
+ static bool shouldSpeculateInt32ForArithmetic(Node* op1, Node* op2)
{
- return op1->shouldSpeculateInt32OrBoolean()
- && op2->shouldSpeculateInt32OrBoolean();
+ return op1->shouldSpeculateInt32ForArithmetic() && op2->shouldSpeculateInt32ForArithmetic();
}
- static bool shouldSpeculateInt32OrBooleanForArithmetic(Node* op1, Node* op2)
+ static bool shouldSpeculateInt32ExpectingDefined(Node* op1, Node* op2)
{
- return op1->shouldSpeculateInt32OrBooleanForArithmetic()
- && op2->shouldSpeculateInt32OrBooleanForArithmetic();
- }
-
- static bool shouldSpeculateInt32OrBooleanExpectingDefined(Node* op1, Node* op2)
- {
- return op1->shouldSpeculateInt32OrBooleanExpectingDefined()
- && op2->shouldSpeculateInt32OrBooleanExpectingDefined();
+ return op1->shouldSpeculateInt32ExpectingDefined() && op2->shouldSpeculateInt32ExpectingDefined();
}
static bool shouldSpeculateMachineInt(Node* op1, Node* op2)
@@ -1974,100 +1463,49 @@ struct Node {
return op1->shouldSpeculateMachineInt() && op2->shouldSpeculateMachineInt();
}
- static bool shouldSpeculateNumber(Node* op1, Node* op2)
+ static bool shouldSpeculateMachineIntForArithmetic(Node* op1, Node* op2)
{
- return op1->shouldSpeculateNumber() && op2->shouldSpeculateNumber();
+ return op1->shouldSpeculateMachineIntForArithmetic() && op2->shouldSpeculateMachineIntForArithmetic();
}
- static bool shouldSpeculateNumberOrBoolean(Node* op1, Node* op2)
+ static bool shouldSpeculateMachineIntExpectingDefined(Node* op1, Node* op2)
{
- return op1->shouldSpeculateNumberOrBoolean()
- && op2->shouldSpeculateNumberOrBoolean();
+ return op1->shouldSpeculateMachineIntExpectingDefined() && op2->shouldSpeculateMachineIntExpectingDefined();
}
- static bool shouldSpeculateNumberOrBooleanExpectingDefined(Node* op1, Node* op2)
+ static bool shouldSpeculateDoubleForArithmetic(Node* op1, Node* op2)
{
- return op1->shouldSpeculateNumberOrBooleanExpectingDefined()
- && op2->shouldSpeculateNumberOrBooleanExpectingDefined();
+ return op1->shouldSpeculateDoubleForArithmetic() && op2->shouldSpeculateDoubleForArithmetic();
}
- static bool shouldSpeculateFinalObject(Node* op1, Node* op2)
- {
- return op1->shouldSpeculateFinalObject() && op2->shouldSpeculateFinalObject();
- }
-
- static bool shouldSpeculateArray(Node* op1, Node* op2)
- {
- return op1->shouldSpeculateArray() && op2->shouldSpeculateArray();
- }
-
- bool canSpeculateInt32(RareCaseProfilingSource source)
- {
- return nodeCanSpeculateInt32(arithNodeFlags(), source);
- }
-
- bool canSpeculateInt52(RareCaseProfilingSource source)
- {
- return nodeCanSpeculateInt52(arithNodeFlags(), source);
- }
-
- RareCaseProfilingSource sourceFor(PredictionPass pass)
+ static bool shouldSpeculateNumber(Node* op1, Node* op2)
{
- if (pass == PrimaryPass || child1()->sawBooleans() || (child2() && child2()->sawBooleans()))
- return DFGRareCase;
- return AllRareCases;
+ return op1->shouldSpeculateNumber() && op2->shouldSpeculateNumber();
}
- bool canSpeculateInt32(PredictionPass pass)
+ static bool shouldSpeculateNumberExpectingDefined(Node* op1, Node* op2)
{
- return canSpeculateInt32(sourceFor(pass));
+ return op1->shouldSpeculateNumberExpectingDefined() && op2->shouldSpeculateNumberExpectingDefined();
}
- bool canSpeculateInt52(PredictionPass pass)
- {
- return canSpeculateInt52(sourceFor(pass));
- }
-
- bool hasTypeLocation()
- {
- return op() == ProfileType;
- }
-
- TypeLocation* typeLocation()
- {
- ASSERT(hasTypeLocation());
- return reinterpret_cast<TypeLocation*>(m_opInfo);
- }
-
- bool hasBasicBlockLocation()
+ static bool shouldSpeculateFinalObject(Node* op1, Node* op2)
{
- return op() == ProfileControlFlow;
+ return op1->shouldSpeculateFinalObject() && op2->shouldSpeculateFinalObject();
}
- BasicBlockLocation* basicBlockLocation()
- {
- ASSERT(hasBasicBlockLocation());
- return reinterpret_cast<BasicBlockLocation*>(m_opInfo);
- }
-
- Node* replacement() const
- {
- return m_misc.replacement;
- }
-
- void setReplacement(Node* replacement)
+ static bool shouldSpeculateArray(Node* op1, Node* op2)
{
- m_misc.replacement = replacement;
+ return op1->shouldSpeculateArray() && op2->shouldSpeculateArray();
}
- Epoch epoch() const
+ bool canSpeculateInt32()
{
- return Epoch::fromUnsigned(m_misc.epoch);
+ return nodeCanSpeculateInt32(arithNodeFlags());
}
- void setEpoch(Epoch epoch)
+ bool canSpeculateInt52()
{
- m_misc.epoch = epoch.toUnsigned();
+ return nodeCanSpeculateInt52(arithNodeFlags());
}
void dumpChildren(PrintStream& out)
@@ -2084,9 +1522,12 @@ struct Node {
}
// NB. This class must have a trivial destructor.
-
- NodeOrigin origin;
-
+
+ // Used for determining what bytecode this came from. This is important for
+ // debugging, exceptions, and even basic execution semantics.
+ CodeOrigin codeOrigin;
+ // Code origin for where the node exits to.
+ CodeOrigin codeOriginForExitTarget;
// References to up to 3 children, or links to a variable length set of children.
AdjacencyList children;
@@ -2109,22 +1550,20 @@ public:
AbstractValue value;
// Miscellaneous data that is usually meaningless, but can hold some analysis results
- // if you ask right. For example, if you do Graph::initializeNodeOwners(), Node::owner
+ // if you ask right. For example, if you do Graph::initializeNodeOwners(), misc.owner
// will tell you which basic block a node belongs to. You cannot rely on this persisting
// across transformations unless you do the maintenance work yourself. Other phases use
- // Node::replacement, but they do so manually: first you do Graph::clearReplacements()
- // and then you set, and use, replacement's yourself. Same thing for epoch.
+ // misc.replacement, but they do so manually: first you do Graph::clearReplacements()
+ // and then you set, and use, replacement's yourself.
//
// Bottom line: don't use these fields unless you initialize them yourself, or by
// calling some appropriate methods that initialize them the way you want. Otherwise,
// these fields are meaningless.
-private:
union {
Node* replacement;
- unsigned epoch;
- } m_misc;
-public:
- BasicBlock* owner;
+ BasicBlock* owner;
+ bool needsBarrier;
+ } misc;
};
inline bool nodeComparator(Node* a, Node* b)
diff --git a/Source/JavaScriptCore/dfg/DFGNodeAllocator.h b/Source/JavaScriptCore/dfg/DFGNodeAllocator.h
index e9024cc19..afd72e584 100644
--- a/Source/JavaScriptCore/dfg/DFGNodeAllocator.h
+++ b/Source/JavaScriptCore/dfg/DFGNodeAllocator.h
@@ -26,6 +26,8 @@
#ifndef DFGNodeAllocator_h
#define DFGNodeAllocator_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGAllocator.h"
diff --git a/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp b/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp
index 366fbeceb..396ca6119 100644
--- a/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp
+++ b/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,7 +28,6 @@
#if ENABLE(DFG_JIT)
-#include "JSCInlines.h"
#include <wtf/CommaPrinter.h>
#include <wtf/StringPrintStream.h>
@@ -47,15 +46,9 @@ void dumpNodeFlags(PrintStream& actualOut, NodeFlags flags)
case NodeResultNumber:
out.print(comma, "Number");
break;
- case NodeResultDouble:
- out.print(comma, "Double");
- break;
case NodeResultInt32:
out.print(comma, "Int32");
break;
- case NodeResultInt52:
- out.print(comma, "Int52");
- break;
case NodeResultBoolean:
out.print(comma, "Boolean");
break;
@@ -74,6 +67,12 @@ void dumpNodeFlags(PrintStream& actualOut, NodeFlags flags)
if (flags & NodeHasVarArgs)
out.print(comma, "VarArgs");
+ if (flags & NodeClobbersWorld)
+ out.print(comma, "Clobbers");
+
+ if (flags & NodeMightClobber)
+ out.print(comma, "MightClobber");
+
if (flags & NodeResultMask) {
if (!(flags & NodeBytecodeUsesAsNumber) && !(flags & NodeBytecodeNeedsNegZero))
out.print(comma, "PureInt");
@@ -85,26 +84,17 @@ void dumpNodeFlags(PrintStream& actualOut, NodeFlags flags)
out.print(comma, "UseAsOther");
}
- if (flags & NodeMayOverflowInBaseline)
- out.print(comma, "MayOverflowInBaseline");
+ if (flags & NodeMayOverflow)
+ out.print(comma, "MayOverflow");
- if (flags & NodeMayOverflowInDFG)
- out.print(comma, "MayOverflowInDFG");
-
- if (flags & NodeMayNegZeroInBaseline)
- out.print(comma, "MayNegZeroInBaseline");
-
- if (flags & NodeMayNegZeroInDFG)
- out.print(comma, "MayNegZeroInDFG");
+ if (flags & NodeMayNegZero)
+ out.print(comma, "MayNegZero");
if (flags & NodeBytecodeUsesAsInt)
out.print(comma, "UseAsInt");
-
- if (flags & NodeBytecodeUsesAsArrayIndex)
- out.print(comma, "ReallyWantsInt");
- if (flags & NodeIsFlushed)
- out.print(comma, "IsFlushed");
+ if (!(flags & NodeDoesNotExit))
+ out.print(comma, "CanExit");
CString string = out.toCString();
if (!string.length())
diff --git a/Source/JavaScriptCore/dfg/DFGNodeFlags.h b/Source/JavaScriptCore/dfg/DFGNodeFlags.h
index 4db2a43e7..d68f0587b 100644
--- a/Source/JavaScriptCore/dfg/DFGNodeFlags.h
+++ b/Source/JavaScriptCore/dfg/DFGNodeFlags.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGNodeFlags_h
#define DFGNodeFlags_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include <wtf/PrintStream.h>
@@ -38,38 +40,35 @@ namespace JSC { namespace DFG {
#define NodeResultMask 0x0007
#define NodeResultJS 0x0001
#define NodeResultNumber 0x0002
-#define NodeResultDouble 0x0003
-#define NodeResultInt32 0x0004
-#define NodeResultInt52 0x0005
-#define NodeResultBoolean 0x0006
-#define NodeResultStorage 0x0007
+#define NodeResultInt32 0x0003
+#define NodeResultInt52 0x0004
+#define NodeResultBoolean 0x0005
+#define NodeResultStorage 0x0006
#define NodeMustGenerate 0x0008 // set on nodes that have side effects, and may not trivially be removed by DCE.
#define NodeHasVarArgs 0x0010
-// 0x0020 and 0x0040 are free.
+#define NodeClobbersWorld 0x0020
+#define NodeMightClobber 0x0040
-#define NodeBehaviorMask 0x0780
-#define NodeMayOverflowInBaseline 0x0080
-#define NodeMayOverflowInDFG 0x0100
-#define NodeMayNegZeroInBaseline 0x0200
-#define NodeMayNegZeroInDFG 0x0400
+#define NodeBehaviorMask 0x0180
+#define NodeMayOverflow 0x0080
+#define NodeMayNegZero 0x0100
-#define NodeBytecodeBackPropMask 0xf800
+#define NodeBytecodeBackPropMask 0x1E00
#define NodeBytecodeUseBottom 0x0000
-#define NodeBytecodeUsesAsNumber 0x0800 // The result of this computation may be used in a context that observes fractional, or bigger-than-int32, results.
-#define NodeBytecodeNeedsNegZero 0x1000 // The result of this computation may be used in a context that observes -0.
-#define NodeBytecodeUsesAsOther 0x2000 // The result of this computation may be used in a context that distinguishes between NaN and other things (like undefined).
+#define NodeBytecodeUsesAsNumber 0x0200 // The result of this computation may be used in a context that observes fractional, or bigger-than-int32, results.
+#define NodeBytecodeNeedsNegZero 0x0400 // The result of this computation may be used in a context that observes -0.
+#define NodeBytecodeUsesAsOther 0x0800 // The result of this computation may be used in a context that distinguishes between NaN and other things (like undefined).
#define NodeBytecodeUsesAsValue (NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsOther)
-#define NodeBytecodeUsesAsInt 0x4000 // The result of this computation is known to be used in a context that prefers, but does not require, integer values.
-#define NodeBytecodeUsesAsArrayIndex 0x8000 // The result of this computation is known to be used in a context that strongly prefers integer values, to the point that we should avoid using doubles if at all possible.
+#define NodeBytecodeUsesAsInt 0x1000 // The result of this computation is known to be used in a context that prefers, but does not require, integer values.
#define NodeArithFlagsMask (NodeBehaviorMask | NodeBytecodeBackPropMask)
-#define NodeIsFlushed 0x10000 // Computed by CPSRethreadingPhase, will tell you which local nodes are backwards-reachable from a Flush.
+#define NodeDoesNotExit 0x2000 // This flag is negated to make it natural for the default to be that a node does exit.
+
+#define NodeRelevantToOSR 0x4000
-#define NodeMiscFlag1 0x20000
-#define NodeMiscFlag2 0x40000
-#define NodeMiscFlag3 0x80000
+#define NodeIsStaticConstant 0x8000 // Used only by the parser, to determine if a constant arose statically and hence could be folded at parse-time.
typedef uint32_t NodeFlags;
@@ -88,79 +87,35 @@ static inline bool bytecodeCanIgnoreNegativeZero(NodeFlags flags)
return !(flags & NodeBytecodeNeedsNegZero);
}
-enum RareCaseProfilingSource {
- BaselineRareCase, // Comes from slow case counting in the baseline JIT.
- DFGRareCase, // Comes from OSR exit profiles.
- AllRareCases
-};
-
-static inline bool nodeMayOverflow(NodeFlags flags, RareCaseProfilingSource source)
+static inline bool nodeMayOverflow(NodeFlags flags)
{
- NodeFlags mask = 0;
- switch (source) {
- case BaselineRareCase:
- mask = NodeMayOverflowInBaseline;
- break;
- case DFGRareCase:
- mask = NodeMayOverflowInDFG;
- break;
- case AllRareCases:
- mask = NodeMayOverflowInBaseline | NodeMayOverflowInDFG;
- break;
- }
- return !!(flags & mask);
+ return !!(flags & NodeMayOverflow);
}
-static inline bool nodeMayNegZero(NodeFlags flags, RareCaseProfilingSource source)
+static inline bool nodeMayNegZero(NodeFlags flags)
{
- NodeFlags mask = 0;
- switch (source) {
- case BaselineRareCase:
- mask = NodeMayNegZeroInBaseline;
- break;
- case DFGRareCase:
- mask = NodeMayNegZeroInDFG;
- break;
- case AllRareCases:
- mask = NodeMayNegZeroInBaseline | NodeMayNegZeroInDFG;
- break;
- }
- return !!(flags & mask);
+ return !!(flags & NodeMayNegZero);
}
-static inline bool nodeCanSpeculateInt32(NodeFlags flags, RareCaseProfilingSource source)
+static inline bool nodeCanSpeculateInt32(NodeFlags flags)
{
- if (nodeMayOverflow(flags, source))
+ if (nodeMayOverflow(flags))
return !bytecodeUsesAsNumber(flags);
- if (nodeMayNegZero(flags, source))
+ if (nodeMayNegZero(flags))
return bytecodeCanIgnoreNegativeZero(flags);
return true;
}
-static inline bool nodeCanSpeculateInt52(NodeFlags flags, RareCaseProfilingSource source)
+static inline bool nodeCanSpeculateInt52(NodeFlags flags)
{
- if (nodeMayNegZero(flags, source))
+ if (nodeMayNegZero(flags))
return bytecodeCanIgnoreNegativeZero(flags);
return true;
}
-// FIXME: Get rid of this.
-// https://bugs.webkit.org/show_bug.cgi?id=131689
-static inline NodeFlags canonicalResultRepresentation(NodeFlags flags)
-{
- switch (flags) {
- case NodeResultDouble:
- case NodeResultInt52:
- case NodeResultStorage:
- return flags;
- default:
- return NodeResultJS;
- }
-}
-
void dumpNodeFlags(PrintStream&, NodeFlags);
MAKE_PRINT_ADAPTOR(NodeFlagsDump, NodeFlags, dumpNodeFlags);
diff --git a/Source/JavaScriptCore/dfg/DFGNodeOrigin.h b/Source/JavaScriptCore/dfg/DFGNodeOrigin.h
deleted file mode 100644
index 12cc064dc..000000000
--- a/Source/JavaScriptCore/dfg/DFGNodeOrigin.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGNodeOrigin_h
-#define DFGNodeOrigin_h
-
-#if ENABLE(DFG_JIT)
-
-#include "CodeOrigin.h"
-
-namespace JSC { namespace DFG {
-
-struct NodeOrigin {
- NodeOrigin() { }
-
- explicit NodeOrigin(CodeOrigin codeOrigin)
- : semantic(codeOrigin)
- , forExit(codeOrigin)
- {
- }
-
- NodeOrigin(CodeOrigin semantic, CodeOrigin forExit)
- : semantic(semantic)
- , forExit(forExit)
- {
- }
-
- bool isSet() const
- {
- ASSERT(semantic.isSet() == forExit.isSet());
- return semantic.isSet();
- }
-
- // Used for determining what bytecode this came from. This is important for
- // debugging, exceptions, and even basic execution semantics.
- CodeOrigin semantic;
- // Code origin for where the node exits to.
- CodeOrigin forExit;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGNodeOrigin_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGNodeType.h b/Source/JavaScriptCore/dfg/DFGNodeType.h
index e9bf72704..3d2f80bcf 100644
--- a/Source/JavaScriptCore/dfg/DFGNodeType.h
+++ b/Source/JavaScriptCore/dfg/DFGNodeType.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGNodeType_h
#define DFGNodeType_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGNodeFlags.h"
@@ -35,11 +37,11 @@ namespace JSC { namespace DFG {
// This macro defines a set of information about all known node types, used to populate NodeId, NodeType below.
#define FOR_EACH_DFG_OP(macro) \
/* A constant in the CodeBlock's constant pool. */\
- macro(JSConstant, NodeResultJS) \
+ macro(JSConstant, NodeResultJS | NodeDoesNotExit) \
\
- /* Constants with specific representations. */\
- macro(DoubleConstant, NodeResultDouble) \
- macro(Int52Constant, NodeResultInt52) \
+ /* A constant not in the CodeBlock's constant pool. Uses get patched to jumps that exit the */\
+ /* code block. */\
+ macro(WeakJSConstant, NodeResultJS | NodeDoesNotExit) \
\
/* Marker to indicate that an operation was optimized entirely and all that is left */\
/* is to make one node alias another. CSE will later usually eliminate this node, */\
@@ -50,33 +52,21 @@ namespace JSC { namespace DFG {
macro(ToThis, NodeResultJS) \
macro(CreateThis, NodeResultJS) /* Note this is not MustGenerate since we're returning it anyway. */ \
macro(GetCallee, NodeResultJS) \
- macro(GetArgumentCount, NodeResultInt32) \
\
/* Nodes for local variable access. These nodes are linked together using Phi nodes. */\
/* Any two nodes that are part of the same Phi graph will share the same */\
- /* VariableAccessData, and thus will share predictions. FIXME: We should come up with */\
- /* better names for a lot of these. https://bugs.webkit.org/show_bug.cgi?id=137307. */\
- /* Note that GetLocal is MustGenerate because it's our only way of knowing that some other */\
- /* basic block might have read a local variable in bytecode. We only remove GetLocals if it */\
- /* is redundant because of an earlier GetLocal or SetLocal in the same block. We could make */\
- /* these not MustGenerate and use a more sophisticated analysis to insert PhantomLocals in */\
- /* the same way that we insert Phantoms. That's hard and probably not profitable. See */\
- /* https://bugs.webkit.org/show_bug.cgi?id=144086 */\
- macro(GetLocal, NodeResultJS | NodeMustGenerate) \
+ /* VariableAccessData, and thus will share predictions. */\
+ macro(GetLocal, NodeResultJS) \
macro(SetLocal, 0) \
- \
- macro(PutStack, NodeMustGenerate) \
- macro(KillStack, NodeMustGenerate) \
- macro(GetStack, NodeResultJS) \
- \
- macro(MovHint, NodeMustGenerate) \
- macro(ZombieHint, NodeMustGenerate) \
+ macro(MovHint, NodeDoesNotExit) \
+ macro(ZombieHint, NodeDoesNotExit) \
+ macro(GetArgument, NodeResultJS | NodeMustGenerate) \
macro(Phantom, NodeMustGenerate) \
- macro(Check, NodeMustGenerate) /* Used if we want just a type check but not liveness. Non-checking uses will be removed. */\
- macro(Upsilon, 0) \
- macro(Phi, 0) \
- macro(Flush, NodeMustGenerate) \
- macro(PhantomLocal, NodeMustGenerate) \
+ macro(Check, 0) /* Used if we want just a type check but not liveness. DCE eithers kills this or converts it to Phantom. */\
+ macro(Upsilon, NodeDoesNotExit | NodeRelevantToOSR) \
+ macro(Phi, NodeDoesNotExit | NodeRelevantToOSR) \
+ macro(Flush, NodeMustGenerate | NodeDoesNotExit) \
+ macro(PhantomLocal, NodeMustGenerate | NodeDoesNotExit) \
\
/* Hint that this is where bytecode thinks is a good place to OSR. Note that this */\
/* will exist even in inlined loops. This has no execution semantics but it must */\
@@ -91,7 +81,6 @@ namespace JSC { namespace DFG {
/* Tier-up checks from the DFG to the FTL. */\
macro(CheckTierUpInLoop, NodeMustGenerate) \
macro(CheckTierUpAndOSREnter, NodeMustGenerate) \
- macro(CheckTierUpWithNestedTriggerAndOSREnter, NodeMustGenerate) \
macro(CheckTierUpAtReturn, NodeMustGenerate) \
\
/* Get the value of a local variable, without linking into the VariableAccessData */\
@@ -100,7 +89,7 @@ namespace JSC { namespace DFG {
macro(GetLocalUnlinked, NodeResultJS) \
\
/* Marker for an argument being set at the prologue of a function. */\
- macro(SetArgument, 0) \
+ macro(SetArgument, NodeDoesNotExit) \
\
/* Marker of location in the IR where we may possibly perform jump replacement to */\
/* invalidate this code block. */\
@@ -117,103 +106,97 @@ namespace JSC { namespace DFG {
macro(ValueToInt32, NodeResultInt32) \
/* Used to box the result of URShift nodes (result has range 0..2^32-1). */\
macro(UInt32ToNumber, NodeResultNumber) \
- /* Converts booleans to numbers but passes everything else through. */\
- macro(BooleanToNumber, NodeResultJS) \
\
- /* Attempt to truncate a double to int32; this will exit if it can't do it. */\
+ /* Used to cast known integers to doubles, so as to separate the double form */\
+ /* of the value from the integer form. */\
+ macro(Int32ToDouble, NodeResultNumber) \
+ /* Used to speculate that a double value is actually an integer. */\
macro(DoubleAsInt32, NodeResultInt32) \
- \
- /* Change the representation of a value. */\
- macro(DoubleRep, NodeResultDouble) \
- macro(Int52Rep, NodeResultInt52) \
- macro(ValueRep, NodeResultJS) \
- \
- /* Bogus type asserting node. Useful for testing, disappears during Fixup. */\
- macro(FiatInt52, NodeResultJS) \
- \
- /* Nodes for arithmetic operations. Note that if they do checks other than just type checks, */\
- /* then they are MustGenerate. This is probably stricter than it needs to be - for example */\
- /* they won't do checks if they are speculated double. Also, we could kill these if we do it */\
- /* before AI starts eliminating downstream operations based on proofs, for example in the */\
- /* case of "var tmp = a + b; return (tmp | 0) == tmp;". If a, b are speculated integer then */\
- /* this is only true if we do the overflow check - hence the need to keep it alive. More */\
- /* generally, we need to keep alive any operation whose checks cause filtration in AI. */\
- macro(ArithAdd, NodeResultNumber | NodeMustGenerate) \
- macro(ArithClz32, NodeResultInt32) \
- macro(ArithSub, NodeResultNumber | NodeMustGenerate) \
- macro(ArithNegate, NodeResultNumber | NodeMustGenerate) \
- macro(ArithMul, NodeResultNumber | NodeMustGenerate) \
+ /* Used to separate representation and register allocation of Int52's represented */\
+ /* as values. */\
+ macro(Int52ToValue, NodeResultJS) \
+ macro(Int52ToDouble, NodeResultNumber) \
+ \
+ /* Nodes for arithmetic operations. */\
+ macro(ArithAdd, NodeResultNumber) \
+ macro(ArithSub, NodeResultNumber) \
+ macro(ArithNegate, NodeResultNumber) \
+ macro(ArithMul, NodeResultNumber) \
macro(ArithIMul, NodeResultInt32) \
- macro(ArithDiv, NodeResultNumber | NodeMustGenerate) \
- macro(ArithMod, NodeResultNumber | NodeMustGenerate) \
- macro(ArithAbs, NodeResultNumber | NodeMustGenerate) \
+ macro(ArithDiv, NodeResultNumber) \
+ macro(ArithMod, NodeResultNumber) \
+ macro(ArithAbs, NodeResultNumber) \
macro(ArithMin, NodeResultNumber) \
macro(ArithMax, NodeResultNumber) \
- macro(ArithFRound, NodeResultNumber) \
- macro(ArithPow, NodeResultNumber) \
- macro(ArithRound, NodeResultNumber) \
macro(ArithSqrt, NodeResultNumber) \
macro(ArithSin, NodeResultNumber) \
macro(ArithCos, NodeResultNumber) \
- macro(ArithLog, NodeResultNumber) \
\
/* Add of values may either be arithmetic, or result in string concatenation. */\
- macro(ValueAdd, NodeResultJS | NodeMustGenerate) \
+ macro(ValueAdd, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
\
/* Property access. */\
/* PutByValAlias indicates a 'put' aliases a prior write to the same property. */\
/* Since a put to 'length' may invalidate optimizations here, */\
/* this must be the directly subsequent property put. Note that PutByVal */\
/* opcodes use VarArgs beause they may have up to 4 children. */\
- macro(GetByVal, NodeResultJS | NodeMustGenerate) \
- macro(GetMyArgumentByVal, NodeResultJS | NodeMustGenerate) \
- macro(LoadVarargs, NodeMustGenerate) \
- macro(ForwardVarargs, NodeMustGenerate) \
- macro(PutByValDirect, NodeMustGenerate | NodeHasVarArgs) \
- macro(PutByVal, NodeMustGenerate | NodeHasVarArgs) \
- macro(PutByValAlias, NodeMustGenerate | NodeHasVarArgs) \
- macro(GetById, NodeResultJS | NodeMustGenerate) \
- macro(GetByIdFlush, NodeResultJS | NodeMustGenerate) \
- macro(PutById, NodeMustGenerate) \
- macro(PutByIdFlush, NodeMustGenerate | NodeMustGenerate) \
- macro(PutByIdDirect, NodeMustGenerate) \
+ macro(GetByVal, NodeResultJS | NodeMustGenerate | NodeMightClobber) \
+ macro(PutByValDirect, NodeMustGenerate | NodeHasVarArgs | NodeMightClobber) \
+ macro(PutByVal, NodeMustGenerate | NodeHasVarArgs | NodeMightClobber) \
+ macro(PutByValAlias, NodeMustGenerate | NodeHasVarArgs | NodeMightClobber) \
+ macro(GetById, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(GetByIdFlush, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(PutById, NodeMustGenerate | NodeClobbersWorld) \
+ macro(PutByIdDirect, NodeMustGenerate | NodeClobbersWorld) \
macro(CheckStructure, NodeMustGenerate) \
- macro(GetExecutable, NodeResultJS) \
+ macro(CheckExecutable, NodeMustGenerate) \
+ /* Transition watchpoints are a contract between the party setting the watchpoint */\
+ /* and the runtime system, where the party promises that the child object once had */\
+ /* the structure being watched, and the runtime system in turn promises that the */\
+ /* watchpoint will be turned into an OSR exit if any object with that structure */\
+ /* ever transitions to a different structure. Hence, the child object must have */\
+ /* previously had a CheckStructure executed on it or we're dealing with an object */\
+ /* constant (WeakJSConstant) and the object was known to have that structure at */\
+ /* compile-time. In the latter case this means that no structure checks have to be */\
+ /* performed for this object by JITted code. In the former case this means that*/\
+ /* the object's structure does not need to be rechecked due to side-effecting */\
+ /* (clobbering) operations. */\
+ macro(StructureTransitionWatchpoint, NodeMustGenerate) \
macro(PutStructure, NodeMustGenerate) \
- macro(AllocatePropertyStorage, NodeMustGenerate | NodeResultStorage) \
- macro(ReallocatePropertyStorage, NodeMustGenerate | NodeResultStorage) \
+ macro(PhantomPutStructure, NodeMustGenerate | NodeDoesNotExit) \
+ macro(AllocatePropertyStorage, NodeMustGenerate | NodeDoesNotExit | NodeResultStorage) \
+ macro(ReallocatePropertyStorage, NodeMustGenerate | NodeDoesNotExit | NodeResultStorage) \
macro(GetButterfly, NodeResultStorage) \
macro(CheckArray, NodeMustGenerate) \
macro(Arrayify, NodeMustGenerate) \
macro(ArrayifyToStructure, NodeMustGenerate) \
macro(GetIndexedPropertyStorage, NodeResultStorage) \
macro(ConstantStoragePointer, NodeResultStorage) \
- macro(GetGetter, NodeResultJS) \
- macro(GetSetter, NodeResultJS) \
+ macro(TypedArrayWatchpoint, NodeMustGenerate) \
macro(GetByOffset, NodeResultJS) \
- macro(GetGetterSetterByOffset, NodeResultJS) \
- macro(MultiGetByOffset, NodeResultJS | NodeMustGenerate) \
macro(PutByOffset, NodeMustGenerate) \
- macro(MultiPutByOffset, NodeMustGenerate) \
macro(GetArrayLength, NodeResultInt32) \
macro(GetTypedArrayByteOffset, NodeResultInt32) \
macro(GetScope, NodeResultJS) \
+ macro(GetMyScope, NodeResultJS) \
+ macro(SkipTopScope, NodeResultJS) \
macro(SkipScope, NodeResultJS) \
+ macro(GetClosureRegisters, NodeResultStorage) \
macro(GetClosureVar, NodeResultJS) \
macro(PutClosureVar, NodeMustGenerate) \
macro(GetGlobalVar, NodeResultJS) \
macro(PutGlobalVar, NodeMustGenerate) \
macro(NotifyWrite, NodeMustGenerate) \
+ macro(VariableWatchpoint, NodeMustGenerate) \
macro(VarInjectionWatchpoint, NodeMustGenerate) \
- macro(CheckCell, NodeMustGenerate) \
- macro(CheckNotEmpty, NodeMustGenerate) \
- macro(CheckBadCell, NodeMustGenerate) \
+ macro(FunctionReentryWatchpoint, NodeMustGenerate) \
+ macro(CheckFunction, NodeMustGenerate) \
+ macro(AllocationProfileWatchpoint, NodeMustGenerate) \
macro(CheckInBounds, NodeMustGenerate) \
- macro(CheckIdent, NodeMustGenerate) \
\
/* Optimizations for array mutation. */\
- macro(ArrayPush, NodeResultJS | NodeMustGenerate) \
- macro(ArrayPop, NodeResultJS | NodeMustGenerate) \
+ macro(ArrayPush, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(ArrayPop, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
\
/* Optimizations for regular expression matching. */\
macro(RegExpExec, NodeResultJS | NodeMustGenerate) \
@@ -225,39 +208,27 @@ namespace JSC { namespace DFG {
macro(StringFromCharCode, NodeResultJS) \
\
/* Nodes for comparison operations. */\
- macro(CompareLess, NodeResultBoolean | NodeMustGenerate) \
- macro(CompareLessEq, NodeResultBoolean | NodeMustGenerate) \
- macro(CompareGreater, NodeResultBoolean | NodeMustGenerate) \
- macro(CompareGreaterEq, NodeResultBoolean | NodeMustGenerate) \
- macro(CompareEq, NodeResultBoolean | NodeMustGenerate) \
+ macro(CompareLess, NodeResultBoolean | NodeMustGenerate | NodeClobbersWorld) \
+ macro(CompareLessEq, NodeResultBoolean | NodeMustGenerate | NodeClobbersWorld) \
+ macro(CompareGreater, NodeResultBoolean | NodeMustGenerate | NodeClobbersWorld) \
+ macro(CompareGreaterEq, NodeResultBoolean | NodeMustGenerate | NodeClobbersWorld) \
+ macro(CompareEq, NodeResultBoolean | NodeMustGenerate | NodeClobbersWorld) \
macro(CompareEqConstant, NodeResultBoolean) \
macro(CompareStrictEq, NodeResultBoolean) \
+ macro(CompareStrictEqConstant, NodeResultBoolean) \
\
/* Calls. */\
- macro(Call, NodeResultJS | NodeMustGenerate | NodeHasVarArgs) \
- macro(Construct, NodeResultJS | NodeMustGenerate | NodeHasVarArgs) \
- macro(CallVarargs, NodeResultJS | NodeMustGenerate) \
- macro(CallForwardVarargs, NodeResultJS | NodeMustGenerate) \
- macro(ConstructVarargs, NodeResultJS | NodeMustGenerate) \
- macro(ConstructForwardVarargs, NodeResultJS | NodeMustGenerate) \
+ macro(Call, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
+ macro(Construct, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
\
/* Allocations. */\
macro(NewObject, NodeResultJS) \
macro(NewArray, NodeResultJS | NodeHasVarArgs) \
- macro(NewArrayWithSize, NodeResultJS | NodeMustGenerate) \
+ macro(NewArrayWithSize, NodeResultJS) \
macro(NewArrayBuffer, NodeResultJS) \
- macro(NewTypedArray, NodeResultJS | NodeMustGenerate) \
+ macro(NewTypedArray, NodeResultJS | NodeClobbersWorld | NodeMustGenerate) \
macro(NewRegexp, NodeResultJS) \
\
- /* Support for allocation sinking. */\
- macro(PhantomNewObject, NodeResultJS | NodeMustGenerate) \
- macro(PutHint, NodeMustGenerate) \
- macro(CheckStructureImmediate, NodeMustGenerate) \
- macro(MaterializeNewObject, NodeResultJS | NodeHasVarArgs) \
- macro(PhantomNewFunction, NodeResultJS | NodeMustGenerate) \
- macro(PhantomCreateActivation, NodeResultJS | NodeMustGenerate) \
- macro(MaterializeCreateActivation, NodeResultJS | NodeHasVarArgs) \
- \
/* Nodes for misc operations. */\
macro(Breakpoint, NodeMustGenerate) \
macro(ProfileWillCall, NodeMustGenerate) \
@@ -269,30 +240,36 @@ namespace JSC { namespace DFG {
macro(IsNumber, NodeResultBoolean) \
macro(IsString, NodeResultBoolean) \
macro(IsObject, NodeResultBoolean) \
- macro(IsObjectOrNull, NodeResultBoolean) \
macro(IsFunction, NodeResultBoolean) \
macro(TypeOf, NodeResultJS) \
macro(LogicalNot, NodeResultBoolean) \
- macro(ToPrimitive, NodeResultJS | NodeMustGenerate) \
- macro(ToString, NodeResultJS | NodeMustGenerate) \
- macro(CallStringConstructor, NodeResultJS | NodeMustGenerate) \
+ macro(ToPrimitive, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(ToString, NodeResultJS | NodeMustGenerate | NodeMightClobber) \
macro(NewStringObject, NodeResultJS) \
macro(MakeRope, NodeResultJS) \
- macro(In, NodeResultBoolean | NodeMustGenerate) \
- macro(ProfileType, NodeMustGenerate) \
- macro(ProfileControlFlow, NodeMustGenerate) \
+ macro(In, NodeResultBoolean | NodeMustGenerate | NodeClobbersWorld) \
\
+ /* Nodes used for activations. Activation support works by having it anchored at */\
+ /* epilgoues via TearOffActivation, and all CreateActivation nodes kept alive by */\
+ /* being threaded with each other. */\
macro(CreateActivation, NodeResultJS) \
+ macro(TearOffActivation, NodeMustGenerate) \
+ \
+ /* Nodes used for arguments. Similar to activation support, only it makes even less */\
+ /* sense. */\
+ macro(CreateArguments, NodeResultJS) \
+ macro(PhantomArguments, NodeResultJS | NodeDoesNotExit) \
+ macro(TearOffArguments, NodeMustGenerate) \
+ macro(GetMyArgumentsLength, NodeResultJS | NodeMustGenerate) \
+ macro(GetMyArgumentByVal, NodeResultJS | NodeMustGenerate) \
+ macro(GetMyArgumentsLengthSafe, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(GetMyArgumentByValSafe, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(CheckArgumentsNotCreated, NodeMustGenerate) \
\
- macro(CreateDirectArguments, NodeResultJS) \
- macro(PhantomDirectArguments, NodeResultJS | NodeMustGenerate) \
- macro(CreateScopedArguments, NodeResultJS) \
- macro(CreateClonedArguments, NodeResultJS) \
- macro(PhantomClonedArguments, NodeResultJS | NodeMustGenerate) \
- macro(GetFromArguments, NodeResultJS) \
- macro(PutToArguments, NodeMustGenerate) \
- \
+ /* Nodes for creating functions. */\
+ macro(NewFunctionNoCheck, NodeResultJS) \
macro(NewFunction, NodeResultJS) \
+ macro(NewFunctionExpression, NodeResultJS) \
\
/* These aren't terminals but always exit */ \
macro(Throw, NodeMustGenerate) \
@@ -313,27 +290,13 @@ namespace JSC { namespace DFG {
/* different compiler. */\
macro(ForceOSRExit, NodeMustGenerate) \
\
- /* Vends a bottom JS value. It is invalid to ever execute this. Useful for cases */\
- /* where we know that we would have exited but we'd like to still track the control */\
- /* flow. */\
- macro(BottomValue, NodeResultJS) \
- \
/* Checks the watchdog timer. If the timer has fired, we OSR exit to the */ \
/* baseline JIT to redo the watchdog timer check, and service the timer. */ \
macro(CheckWatchdogTimer, NodeMustGenerate) \
/* Write barriers ! */\
macro(StoreBarrier, NodeMustGenerate) \
- \
- /* For-in enumeration opcodes */\
- macro(GetEnumerableLength, NodeMustGenerate | NodeResultJS) \
- macro(HasIndexedProperty, NodeResultBoolean) \
- macro(HasStructureProperty, NodeResultBoolean) \
- macro(HasGenericProperty, NodeResultBoolean) \
- macro(GetDirectPname, NodeMustGenerate | NodeHasVarArgs | NodeResultJS) \
- macro(GetPropertyEnumerator, NodeMustGenerate | NodeResultJS) \
- macro(GetEnumeratorStructurePname, NodeMustGenerate | NodeResultJS) \
- macro(GetEnumeratorGenericPname, NodeMustGenerate | NodeResultJS) \
- macro(ToIndexString, NodeResultJS)
+ macro(ConditionalStoreBarrier, NodeMustGenerate) \
+ macro(StoreBarrierWithNullCheck, NodeMustGenerate) \
// This enum generates a monotonically increasing id for all Node types,
// and is used by the subsequent enum to fill out the id (as accessed via the NodeIdMask).
diff --git a/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.cpp b/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.cpp
index 8c8419a81..a64963581 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,7 +32,7 @@
#include "DFGGraph.h"
#include "DFGInsertionSet.h"
#include "DFGPhase.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -51,22 +51,29 @@ public:
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
- block->ssa->availabilityAtHead.clear();
- block->ssa->availabilityAtTail.clear();
+ block->ssa->availabilityAtHead.fill(Availability());
+ block->ssa->availabilityAtTail.fill(Availability());
}
BasicBlock* root = m_graph.block(0);
- root->ssa->availabilityAtHead.m_locals.fill(Availability::unavailable());
- for (unsigned argument = m_graph.m_argumentFormats.size(); argument--;) {
- FlushedAt flushedAt = FlushedAt(
- m_graph.m_argumentFormats[argument],
- virtualRegisterForArgument(argument));
- root->ssa->availabilityAtHead.m_locals.argument(argument) = Availability(flushedAt);
+ for (unsigned argument = root->ssa->availabilityAtHead.numberOfArguments(); argument--;) {
+ root->ssa->availabilityAtHead.argument(argument) =
+ Availability::unavailable().withFlush(
+ FlushedAt(FlushedJSValue, virtualRegisterForArgument(argument)));
}
-
- // This could be made more efficient by processing blocks in reverse postorder.
+ for (unsigned local = root->ssa->availabilityAtHead.numberOfLocals(); local--;)
+ root->ssa->availabilityAtHead.local(local) = Availability::unavailable();
- LocalOSRAvailabilityCalculator calculator;
+ if (m_graph.m_plan.mode == FTLForOSREntryMode) {
+ for (unsigned local = m_graph.m_profiledBlock->m_numCalleeRegisters; local--;) {
+ root->ssa->availabilityAtHead.local(local) =
+ Availability::unavailable().withFlush(
+ FlushedAt(FlushedJSValue, virtualRegisterForLocal(local)));
+ }
+ }
+
+ // This could be made more efficient by processing blocks in reverse postorder.
+ Operands<Availability> availability;
bool changed;
do {
changed = false;
@@ -76,22 +83,55 @@ public:
if (!block)
continue;
- calculator.beginBlock(block);
+ availability = block->ssa->availabilityAtHead;
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex)
- calculator.executeNode(block->at(nodeIndex));
+ for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
+ Node* node = block->at(nodeIndex);
+
+ switch (node->op()) {
+ case SetLocal: {
+ VariableAccessData* variable = node->variableAccessData();
+ availability.operand(variable->local()) =
+ Availability(node->child1().node(), variable->flushedAt());
+ break;
+ }
+
+ case GetArgument: {
+ VariableAccessData* variable = node->variableAccessData();
+ availability.operand(variable->local()) =
+ Availability(node, variable->flushedAt());
+ break;
+ }
+
+ case MovHint: {
+ availability.operand(node->unlinkedLocal()) =
+ Availability(node->child1().node());
+ break;
+ }
+
+ case ZombieHint: {
+ availability.operand(node->unlinkedLocal()) =
+ Availability::unavailable();
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
- if (calculator.m_availability == block->ssa->availabilityAtTail)
+ if (availability == block->ssa->availabilityAtTail)
continue;
- block->ssa->availabilityAtTail = calculator.m_availability;
+ block->ssa->availabilityAtTail = availability;
changed = true;
for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) {
BasicBlock* successor = block->successor(successorIndex);
- successor->ssa->availabilityAtHead.merge(calculator.m_availability);
- successor->ssa->availabilityAtHead.pruneByLiveness(
- m_graph, successor->firstOrigin().forExit);
+ for (unsigned i = availability.size(); i--;) {
+ successor->ssa->availabilityAtHead[i] = availability[i].merge(
+ successor->ssa->availabilityAtHead[i]);
+ }
}
}
} while (changed);
@@ -106,110 +146,6 @@ bool performOSRAvailabilityAnalysis(Graph& graph)
return runPhase<OSRAvailabilityAnalysisPhase>(graph);
}
-LocalOSRAvailabilityCalculator::LocalOSRAvailabilityCalculator()
-{
-}
-
-LocalOSRAvailabilityCalculator::~LocalOSRAvailabilityCalculator()
-{
-}
-
-void LocalOSRAvailabilityCalculator::beginBlock(BasicBlock* block)
-{
- m_availability = block->ssa->availabilityAtHead;
-}
-
-void LocalOSRAvailabilityCalculator::endBlock(BasicBlock* block)
-{
- m_availability = block->ssa->availabilityAtTail;
-}
-
-void LocalOSRAvailabilityCalculator::executeNode(Node* node)
-{
- switch (node->op()) {
- case PutStack: {
- StackAccessData* data = node->stackAccessData();
- m_availability.m_locals.operand(data->local).setFlush(data->flushedAt());
- break;
- }
-
- case KillStack: {
- m_availability.m_locals.operand(node->unlinkedLocal()).setFlush(FlushedAt(ConflictingFlush));
- break;
- }
-
- case GetStack: {
- StackAccessData* data = node->stackAccessData();
- m_availability.m_locals.operand(data->local) = Availability(node, data->flushedAt());
- break;
- }
-
- case MovHint: {
- m_availability.m_locals.operand(node->unlinkedLocal()).setNode(node->child1().node());
- break;
- }
-
- case ZombieHint: {
- m_availability.m_locals.operand(node->unlinkedLocal()).setNodeUnavailable();
- break;
- }
-
- case LoadVarargs:
- case ForwardVarargs: {
- LoadVarargsData* data = node->loadVarargsData();
- m_availability.m_locals.operand(data->count) =
- Availability(FlushedAt(FlushedInt32, data->machineCount));
- for (unsigned i = data->limit; i--;) {
- m_availability.m_locals.operand(VirtualRegister(data->start.offset() + i)) =
- Availability(FlushedAt(FlushedJSValue, VirtualRegister(data->machineStart.offset() + i)));
- }
- break;
- }
-
- case PhantomDirectArguments:
- case PhantomClonedArguments: {
- InlineCallFrame* inlineCallFrame = node->origin.semantic.inlineCallFrame;
- if (!inlineCallFrame) {
- // We don't need to record anything about how the arguments are to be recovered. It's just a
- // given that we can read them from the stack.
- break;
- }
-
- if (inlineCallFrame->isVarargs()) {
- // Record how to read each argument and the argument count.
- Availability argumentCount =
- m_availability.m_locals.operand(inlineCallFrame->stackOffset + JSStack::ArgumentCount);
-
- m_availability.m_heap.set(PromotedHeapLocation(ArgumentCountPLoc, node), argumentCount);
- }
-
- if (inlineCallFrame->isClosureCall) {
- Availability callee = m_availability.m_locals.operand(
- inlineCallFrame->stackOffset + JSStack::Callee);
- m_availability.m_heap.set(PromotedHeapLocation(ArgumentsCalleePLoc, node), callee);
- }
-
- for (unsigned i = 0; i < inlineCallFrame->arguments.size() - 1; ++i) {
- Availability argument = m_availability.m_locals.operand(
- inlineCallFrame->stackOffset + CallFrame::argumentOffset(i));
-
- m_availability.m_heap.set(PromotedHeapLocation(ArgumentPLoc, node, i), argument);
- }
- break;
- }
-
- case PutHint: {
- m_availability.m_heap.set(
- PromotedHeapLocation(node->child1().node(), node->promotedLocationDescriptor()),
- Availability(node->child2().node()));
- break;
- }
-
- default:
- break;
- }
-}
-
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.h b/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.h
index 064bec03c..28bf505da 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,9 +26,10 @@
#ifndef DFGOSRAvailabilityAnalysisPhase_h
#define DFGOSRAvailabilityAnalysisPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
-#include "DFGBasicBlock.h"
#include "DFGCommon.h"
namespace JSC { namespace DFG {
@@ -36,27 +37,10 @@ namespace JSC { namespace DFG {
class Graph;
// Computes BasicBlock::ssa->availabiltiyAtHead/Tail. This is a forward flow type inference
-// over MovHints and SetLocals. This analysis is run directly by the Plan for preparing for
-// lowering to LLVM IR, but it can also be used as a utility. Note that if you run it before
-// stack layout, all of the flush availability will omit the virtual register - but it will
-// tell you the format.
+// over MovHints and SetLocals.
bool performOSRAvailabilityAnalysis(Graph&);
-// Local calculator for figuring out the availability at any node in a basic block. Requires
-// having run the availability analysis.
-class LocalOSRAvailabilityCalculator {
-public:
- LocalOSRAvailabilityCalculator();
- ~LocalOSRAvailabilityCalculator();
-
- void beginBlock(BasicBlock*);
- void endBlock(BasicBlock*); // Useful if you want to get data for the end of the block. You don't need to call this if you did beginBlock() and then executeNode() for every node.
- void executeNode(Node*);
-
- AvailabilityMap m_availability;
-};
-
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
index 02dbe4fa5..2efb008d0 100644
--- a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013, 2014, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,73 +34,17 @@
#include "DFGNode.h"
#include "JIT.h"
#include "JSStackInlines.h"
-#include "JSCInlines.h"
-#include <wtf/CommaPrinter.h>
+#include "Operations.h"
namespace JSC { namespace DFG {
-void OSREntryData::dumpInContext(PrintStream& out, DumpContext* context) const
-{
- out.print("bc#", m_bytecodeIndex, ", machine code offset = ", m_machineCodeOffset);
- out.print(", stack rules = [");
-
- auto printOperand = [&] (VirtualRegister reg) {
- out.print(inContext(m_expectedValues.operand(reg), context), " (");
- VirtualRegister toReg;
- bool overwritten = false;
- for (OSREntryReshuffling reshuffling : m_reshufflings) {
- if (reg == VirtualRegister(reshuffling.fromOffset)) {
- toReg = VirtualRegister(reshuffling.toOffset);
- break;
- }
- if (reg == VirtualRegister(reshuffling.toOffset))
- overwritten = true;
- }
- if (!overwritten && !toReg.isValid())
- toReg = reg;
- if (toReg.isValid()) {
- if (toReg.isLocal() && !m_machineStackUsed.get(toReg.toLocal()))
- out.print("ignored");
- else
- out.print("maps to ", toReg);
- } else
- out.print("overwritten");
- if (reg.isLocal() && m_localsForcedDouble.get(reg.toLocal()))
- out.print(", forced double");
- if (reg.isLocal() && m_localsForcedMachineInt.get(reg.toLocal()))
- out.print(", forced machine int");
- out.print(")");
- };
-
- CommaPrinter comma;
- for (size_t argumentIndex = m_expectedValues.numberOfArguments(); argumentIndex--;) {
- out.print(comma, "arg", argumentIndex, ":");
- printOperand(virtualRegisterForArgument(argumentIndex));
- }
- for (size_t localIndex = 0; localIndex < m_expectedValues.numberOfLocals(); ++localIndex) {
- out.print(comma, "loc", localIndex, ":");
- printOperand(virtualRegisterForLocal(localIndex));
- }
-
- out.print("], machine stack used = ", m_machineStackUsed);
-}
-
-void OSREntryData::dump(PrintStream& out) const
-{
- dumpInContext(out, nullptr);
-}
-
-SUPPRESS_ASAN
void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIndex)
{
ASSERT(JITCode::isOptimizingJIT(codeBlock->jitType()));
ASSERT(codeBlock->alternative());
ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT);
ASSERT(!codeBlock->jitCodeMap());
-
- if (!Options::enableOSREntryToDFG())
- return 0;
-
+
if (Options::verboseOSR()) {
dataLog(
"DFG OSR in ", *codeBlock->alternative(), " -> ", *codeBlock,
@@ -108,12 +52,6 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
}
VM* vm = &exec->vm();
-
- sanitizeStackForVM(vm);
-
- if (bytecodeIndex)
- codeBlock->ownerExecutable()->setDidTryToEnterInLoop(true);
-
if (codeBlock->jitType() != JITCode::DFGJIT) {
RELEASE_ASSERT(codeBlock->jitType() == JITCode::FTLJIT);
@@ -186,7 +124,7 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
JSValue value;
if (!argument)
- value = exec->thisValue();
+ value = exec->hostThisValue();
else
value = exec->argument(argument - 1);
@@ -203,33 +141,33 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
for (size_t local = 0; local < entry->m_expectedValues.numberOfLocals(); ++local) {
int localOffset = virtualRegisterForLocal(local).offset();
if (entry->m_localsForcedDouble.get(local)) {
- if (!exec->registers()[localOffset].asanUnsafeJSValue().isNumber()) {
+ if (!exec->registers()[localOffset].jsValue().isNumber()) {
if (Options::verboseOSR()) {
dataLog(
" OSR failed because variable ", localOffset, " is ",
- exec->registers()[localOffset].asanUnsafeJSValue(), ", expected number.\n");
+ exec->registers()[localOffset].jsValue(), ", expected number.\n");
}
return 0;
}
continue;
}
if (entry->m_localsForcedMachineInt.get(local)) {
- if (!exec->registers()[localOffset].asanUnsafeJSValue().isMachineInt()) {
+ if (!exec->registers()[localOffset].jsValue().isMachineInt()) {
if (Options::verboseOSR()) {
dataLog(
" OSR failed because variable ", localOffset, " is ",
- exec->registers()[localOffset].asanUnsafeJSValue(), ", expected ",
+ exec->registers()[localOffset].jsValue(), ", expected ",
"machine int.\n");
}
return 0;
}
continue;
}
- if (!entry->m_expectedValues.local(local).validate(exec->registers()[localOffset].asanUnsafeJSValue())) {
+ if (!entry->m_expectedValues.local(local).validate(exec->registers()[localOffset].jsValue())) {
if (Options::verboseOSR()) {
dataLog(
" OSR failed because variable ", localOffset, " is ",
- exec->registers()[localOffset].asanUnsafeJSValue(), ", expected ",
+ exec->registers()[localOffset].jsValue(), ", expected ",
entry->m_expectedValues.local(local), ".\n");
}
return 0;
@@ -243,8 +181,7 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
// it seems silly: you'd be diverting the program to error handling when it
// would have otherwise just kept running albeit less quickly.
- unsigned frameSizeForCheck = jitCode->common.requiredRegisterCountForExecutionAndExit();
- if (!vm->interpreter->stack().ensureCapacityFor(&exec->registers()[virtualRegisterForLocal(frameSizeForCheck - 1).offset()])) {
+ if (!vm->interpreter->stack().grow(&exec->registers()[virtualRegisterForLocal(jitCode->common.requiredRegisterCountForExecutionAndExit()).offset()])) {
if (Options::verboseOSR())
dataLogF(" OSR failed because stack growth failed.\n");
return 0;
@@ -252,70 +189,36 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
if (Options::verboseOSR())
dataLogF(" OSR should succeed.\n");
-
- // At this point we're committed to entering. We will do some work to set things up,
- // but we also rely on our caller recognizing that when we return a non-null pointer,
- // that means that we're already past the point of no return and we must succeed at
- // entering.
-
- // 3) Set up the data in the scratch buffer and perform data format conversions.
-
- unsigned frameSize = jitCode->common.frameRegisterCount;
- unsigned baselineFrameSize = entry->m_expectedValues.numberOfLocals();
- unsigned maxFrameSize = std::max(frameSize, baselineFrameSize);
-
- Register* scratch = bitwise_cast<Register*>(vm->scratchBufferForSize(sizeof(Register) * (2 + JSStack::CallFrameHeaderSize + maxFrameSize))->dataBuffer());
-
- *bitwise_cast<size_t*>(scratch + 0) = frameSize;
- void* targetPC = codeBlock->jitCode()->executableAddressAtOffset(entry->m_machineCodeOffset);
- if (Options::verboseOSR())
- dataLogF(" OSR using target PC %p.\n", targetPC);
- RELEASE_ASSERT(targetPC);
- *bitwise_cast<void**>(scratch + 1) = targetPC;
-
- Register* pivot = scratch + 2 + JSStack::CallFrameHeaderSize;
-
- for (int index = -JSStack::CallFrameHeaderSize; index < static_cast<int>(baselineFrameSize); ++index) {
- VirtualRegister reg(-1 - index);
-
- if (reg.isLocal()) {
- if (entry->m_localsForcedDouble.get(reg.toLocal())) {
- *bitwise_cast<double*>(pivot + index) = exec->registers()[reg.offset()].asanUnsafeJSValue().asNumber();
- continue;
- }
-
- if (entry->m_localsForcedMachineInt.get(reg.toLocal())) {
- *bitwise_cast<int64_t*>(pivot + index) = exec->registers()[reg.offset()].asanUnsafeJSValue().asMachineInt() << JSValue::int52ShiftAmount;
- continue;
- }
- }
-
- pivot[index] = exec->registers()[reg.offset()].asanUnsafeJSValue();
+ // 3) Perform data format conversions.
+ for (size_t local = 0; local < entry->m_expectedValues.numberOfLocals(); ++local) {
+ if (entry->m_localsForcedDouble.get(local))
+ *bitwise_cast<double*>(exec->registers() + virtualRegisterForLocal(local).offset()) = exec->registers()[virtualRegisterForLocal(local).offset()].jsValue().asNumber();
+ if (entry->m_localsForcedMachineInt.get(local))
+ *bitwise_cast<int64_t*>(exec->registers() + virtualRegisterForLocal(local).offset()) = exec->registers()[virtualRegisterForLocal(local).offset()].jsValue().asMachineInt() << JSValue::int52ShiftAmount;
}
// 4) Reshuffle those registers that need reshuffling.
- Vector<JSValue> temporaryLocals(entry->m_reshufflings.size());
+
+ Vector<EncodedJSValue> temporaryLocals(entry->m_reshufflings.size());
+ EncodedJSValue* registers = bitwise_cast<EncodedJSValue*>(exec->registers());
for (unsigned i = entry->m_reshufflings.size(); i--;)
- temporaryLocals[i] = pivot[VirtualRegister(entry->m_reshufflings[i].fromOffset).toLocal()].asanUnsafeJSValue();
+ temporaryLocals[i] = registers[entry->m_reshufflings[i].fromOffset];
for (unsigned i = entry->m_reshufflings.size(); i--;)
- pivot[VirtualRegister(entry->m_reshufflings[i].toOffset).toLocal()] = temporaryLocals[i];
+ registers[entry->m_reshufflings[i].toOffset] = temporaryLocals[i];
- // 5) Clear those parts of the call frame that the DFG ain't using. This helps GC on
- // some programs by eliminating some stale pointer pathologies.
- for (unsigned i = frameSize; i--;) {
- if (entry->m_machineStackUsed.get(i))
- continue;
- pivot[i] = JSValue();
- }
+ // 5) Fix the call frame.
- // 6) Fix the call frame to have the right code block.
+ exec->setCodeBlock(codeBlock);
- *bitwise_cast<CodeBlock**>(pivot - 1 - JSStack::CodeBlock) = codeBlock;
+ // 6) Find and return the destination machine code address.
+
+ void* result = codeBlock->jitCode()->executableAddressAtOffset(entry->m_machineCodeOffset);
if (Options::verboseOSR())
- dataLogF(" OSR returning data buffer %p.\n", scratch);
- return scratch;
+ dataLogF(" OSR returning machine code address %p.\n", result);
+
+ return result;
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGOSREntry.h b/Source/JavaScriptCore/dfg/DFGOSREntry.h
index 04aaabfee..edca84bff 100644
--- a/Source/JavaScriptCore/dfg/DFGOSREntry.h
+++ b/Source/JavaScriptCore/dfg/DFGOSREntry.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -59,10 +59,6 @@ struct OSREntryData {
BitVector m_localsForcedDouble;
BitVector m_localsForcedMachineInt;
Vector<OSREntryReshuffling> m_reshufflings;
- BitVector m_machineStackUsed;
-
- void dumpInContext(PrintStream&, DumpContext*) const;
- void dump(PrintStream&) const;
};
inline unsigned getOSREntryDataBytecodeIndex(OSREntryData* osrEntryData)
@@ -70,8 +66,6 @@ inline unsigned getOSREntryDataBytecodeIndex(OSREntryData* osrEntryData)
return osrEntryData->m_bytecodeIndex;
}
-// Returns a pointer to a data buffer that the OSR entry thunk will recognize and
-// parse. If this returns null, it means
void* prepareOSREntry(ExecState*, CodeBlock*, unsigned bytecodeIndex);
#else
inline void* prepareOSREntry(ExecState*, CodeBlock*, unsigned) { return 0; }
diff --git a/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.cpp b/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.cpp
index 29cc66a53..4f82d15fa 100644
--- a/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,7 +33,7 @@
#include "DFGGraph.h"
#include "DFGLoopPreHeaderCreationPhase.h"
#include "DFGPhase.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -63,12 +63,9 @@ public:
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
- unsigned nodeIndex = 0;
Node* firstNode = block->at(0);
- while (firstNode->isSemanticallySkippable())
- firstNode = block->at(++nodeIndex);
if (firstNode->op() == LoopHint
- && firstNode->origin.semantic == CodeOrigin(bytecodeIndex)) {
+ && firstNode->codeOrigin == CodeOrigin(bytecodeIndex)) {
target = block;
break;
}
@@ -83,14 +80,20 @@ public:
BlockInsertionSet insertionSet(m_graph);
- // We say that the execution count of the entry block is 1, because we know for sure
- // that this must be the case. Under our definition of executionCount, "1" means "once
- // per invocation". We could have said NaN here, since that would ask any clients of
- // executionCount to use best judgement - but that seems unnecessary since we know for
- // sure what the executionCount should be in this case.
- BasicBlock* newRoot = insertionSet.insert(0, 1);
- NodeOrigin origin = target->at(0)->origin;
+ BasicBlock* newRoot = insertionSet.insert(0);
+ CodeOrigin codeOrigin = target->at(0)->codeOrigin;
+ for (int argument = 0; argument < baseline->numParameters(); ++argument) {
+ Node* oldNode = target->variablesAtHead.argument(argument);
+ if (!oldNode) {
+ // Just for sanity, always have a SetArgument even if it's not needed.
+ oldNode = m_graph.m_arguments[argument];
+ }
+ Node* node = newRoot->appendNode(
+ m_graph, SpecNone, SetArgument, codeOrigin,
+ OpInfo(oldNode->variableAccessData()));
+ m_graph.m_arguments[argument] = node;
+ }
Vector<Node*> locals(baseline->m_numCalleeRegisters);
for (int local = 0; local < baseline->m_numCalleeRegisters; ++local) {
Node* previousHead = target->variablesAtHead.local(local);
@@ -98,26 +101,13 @@ public:
continue;
VariableAccessData* variable = previousHead->variableAccessData();
locals[local] = newRoot->appendNode(
- m_graph, variable->prediction(), ExtractOSREntryLocal, origin,
+ m_graph, variable->prediction(), ExtractOSREntryLocal, codeOrigin,
OpInfo(variable->local().offset()));
newRoot->appendNode(
- m_graph, SpecNone, MovHint, origin, OpInfo(variable->local().offset()),
+ m_graph, SpecNone, MovHint, codeOrigin, OpInfo(variable->local().offset()),
Edge(locals[local]));
}
-
- for (int argument = 0; argument < baseline->numParameters(); ++argument) {
- Node* oldNode = target->variablesAtHead.argument(argument);
- if (!oldNode) {
- // Just for sanity, always have a SetArgument even if it's not needed.
- oldNode = m_graph.m_arguments[argument];
- }
- Node* node = newRoot->appendNode(
- m_graph, SpecNone, SetArgument, origin,
- OpInfo(oldNode->variableAccessData()));
- m_graph.m_arguments[argument] = node;
- }
-
for (int local = 0; local < baseline->m_numCalleeRegisters; ++local) {
Node* previousHead = target->variablesAtHead.local(local);
if (!previousHead)
@@ -125,11 +115,11 @@ public:
VariableAccessData* variable = previousHead->variableAccessData();
Node* node = locals[local];
newRoot->appendNode(
- m_graph, SpecNone, SetLocal, origin, OpInfo(variable), Edge(node));
+ m_graph, SpecNone, SetLocal, codeOrigin, OpInfo(variable), Edge(node));
}
newRoot->appendNode(
- m_graph, SpecNone, Jump, origin,
+ m_graph, SpecNone, Jump, codeOrigin,
OpInfo(createPreHeader(m_graph, insertionSet, target)));
insertionSet.execute();
diff --git a/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.h b/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.h
index 2b9beba47..a76372126 100644
--- a/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGOSREntrypointCreationPhase_h
#define DFGOSREntrypointCreationPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
index 8b4d67ace..538a85a01 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
@@ -31,7 +31,7 @@
#include "AssemblyHelpers.h"
#include "DFGGraph.h"
#include "DFGSpeculativeJIT.h"
-#include "JSCInlines.h"
+#include "JSCellInlines.h"
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.h b/Source/JavaScriptCore/dfg/DFGOSRExit.h
index d336d0443..d40efe4e0 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExit.h
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.h
@@ -26,12 +26,15 @@
#ifndef DFGOSRExit_h
#define DFGOSRExit_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "CodeOrigin.h"
#include "DFGCommon.h"
#include "DFGExitProfile.h"
#include "DFGOSRExitBase.h"
+#include "DFGValueRecoveryOverride.h"
#include "GPRInfo.h"
#include "MacroAssembler.h"
#include "MethodOfGettingAValueProfile.h"
@@ -101,16 +104,11 @@ struct OSRExit : public OSRExitBase {
unsigned m_streamIndex;
- void considerAddingAsFrequentExitSite(CodeBlock* profiledCodeBlock)
- {
- OSRExitBase::considerAddingAsFrequentExitSite(profiledCodeBlock, ExitFromDFG);
- }
+ RefPtr<ValueRecoveryOverride> m_valueRecoveryOverride;
};
struct SpeculationFailureDebugInfo {
CodeBlock* codeBlock;
- ExitKind kind;
- unsigned bytecodeOffset;
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitBase.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitBase.cpp
index afc50e83b..ebfd27f2e 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitBase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitBase.cpp
@@ -31,17 +31,19 @@
#include "CodeBlock.h"
#include "DFGBasicBlock.h"
#include "DFGNode.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
-void OSRExitBase::considerAddingAsFrequentExitSiteSlow(CodeBlock* profiledCodeBlock, ExitingJITType jitType)
+bool OSRExitBase::considerAddingAsFrequentExitSiteSlow(CodeBlock* profiledCodeBlock)
{
CodeBlock* sourceProfiledCodeBlock =
baselineCodeBlockForOriginAndBaselineCodeBlock(
m_codeOriginForExitProfile, profiledCodeBlock);
- if (sourceProfiledCodeBlock)
- sourceProfiledCodeBlock->addFrequentExitSite(FrequentExitSite(m_codeOriginForExitProfile.bytecodeIndex, m_kind, jitType));
+ if (!sourceProfiledCodeBlock)
+ return false;
+ return sourceProfiledCodeBlock->addFrequentExitSite(
+ FrequentExitSite(m_codeOriginForExitProfile.bytecodeIndex, m_kind));
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitBase.h b/Source/JavaScriptCore/dfg/DFGOSRExitBase.h
index 099b2d522..ee1d69de7 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitBase.h
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitBase.h
@@ -26,6 +26,8 @@
#ifndef DFGOSRExitBase_h
#define DFGOSRExitBase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "CodeOrigin.h"
@@ -56,15 +58,15 @@ struct OSRExitBase {
CodeOrigin m_codeOrigin;
CodeOrigin m_codeOriginForExitProfile;
-protected:
- void considerAddingAsFrequentExitSite(CodeBlock* profiledCodeBlock, ExitingJITType jitType)
+ bool considerAddingAsFrequentExitSite(CodeBlock* profiledCodeBlock)
{
- if (m_count)
- considerAddingAsFrequentExitSiteSlow(profiledCodeBlock, jitType);
+ if (!m_count)
+ return false;
+ return considerAddingAsFrequentExitSiteSlow(profiledCodeBlock);
}
private:
- void considerAddingAsFrequentExitSiteSlow(CodeBlock* profiledCodeBlock, ExitingJITType);
+ bool considerAddingAsFrequentExitSiteSlow(CodeBlock* profiledCodeBlock);
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompilationInfo.h b/Source/JavaScriptCore/dfg/DFGOSRExitCompilationInfo.h
index 75f1d21c4..9eeb4532d 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompilationInfo.h
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompilationInfo.h
@@ -26,6 +26,8 @@
#ifndef DFGOSRExitCompilationInfo_h
#define DFGOSRExitCompilationInfo_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "CodeOrigin.h"
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
index 23d51c68e..f8c9fb067 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,81 +34,12 @@
#include "DFGOSRExitPreparation.h"
#include "LinkBuffer.h"
#include "OperandsInlines.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include "RepatchBuffer.h"
#include <wtf/StringPrintStream.h>
namespace JSC { namespace DFG {
-void OSRExitCompiler::emitRestoreArguments(const Operands<ValueRecovery>& operands)
-{
- HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
- for (size_t index = 0; index < operands.size(); ++index) {
- const ValueRecovery& recovery = operands[index];
- int operand = operands.operandForIndex(index);
-
- if (recovery.technique() != DirectArgumentsThatWereNotCreated
- && recovery.technique() != ClonedArgumentsThatWereNotCreated)
- continue;
-
- MinifiedID id = recovery.nodeID();
- auto iter = alreadyAllocatedArguments.find(id);
- if (iter != alreadyAllocatedArguments.end()) {
- JSValueRegs regs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT1);
- m_jit.loadValue(CCallHelpers::addressFor(iter->value), regs);
- m_jit.storeValue(regs, CCallHelpers::addressFor(operand));
- continue;
- }
-
- InlineCallFrame* inlineCallFrame =
- m_jit.codeBlock()->jitCode()->dfg()->minifiedDFG.at(id)->inlineCallFrame();
-
- int stackOffset;
- if (inlineCallFrame)
- stackOffset = inlineCallFrame->stackOffset;
- else
- stackOffset = 0;
-
- if (!inlineCallFrame || inlineCallFrame->isClosureCall) {
- m_jit.loadPtr(
- AssemblyHelpers::addressFor(stackOffset + JSStack::Callee),
- GPRInfo::regT0);
- } else {
- m_jit.move(
- AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeRecovery.constant().asCell()),
- GPRInfo::regT0);
- }
-
- if (!inlineCallFrame || inlineCallFrame->isVarargs()) {
- m_jit.load32(
- AssemblyHelpers::payloadFor(stackOffset + JSStack::ArgumentCount),
- GPRInfo::regT1);
- } else {
- m_jit.move(
- AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()),
- GPRInfo::regT1);
- }
-
- m_jit.setupArgumentsWithExecState(
- AssemblyHelpers::TrustedImmPtr(inlineCallFrame), GPRInfo::regT0, GPRInfo::regT1);
- switch (recovery.technique()) {
- case DirectArgumentsThatWereNotCreated:
- m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateDirectArgumentsDuringExit)), GPRInfo::nonArgGPR0);
- break;
- case ClonedArgumentsThatWereNotCreated:
- m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateClonedArgumentsDuringExit)), GPRInfo::nonArgGPR0);
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- m_jit.call(GPRInfo::nonArgGPR0);
- m_jit.storeCell(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(operand));
-
- alreadyAllocatedArguments.add(id, operand);
- }
-}
-
extern "C" {
void compileOSRExit(ExecState* exec)
@@ -135,6 +66,12 @@ void compileOSRExit(ExecState* exec)
Operands<ValueRecovery> operands;
codeBlock->jitCode()->dfg()->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->jitCode()->dfg()->minifiedDFG, exit.m_streamIndex, operands);
+ // There may be an override, for forward speculations.
+ if (!!exit.m_valueRecoveryOverride) {
+ operands.setOperand(
+ exit.m_valueRecoveryOverride->operand, exit.m_valueRecoveryOverride->recovery);
+ }
+
SpeculationRecovery* recovery = 0;
if (exit.m_recoveryIndex != UINT_MAX)
recovery = &codeBlock->jitCode()->dfg()->speculationRecovery[exit.m_recoveryIndex];
@@ -151,15 +88,15 @@ void compileOSRExit(ExecState* exec)
Profiler::OSRExit* profilerExit = compilation->addOSRExit(
exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
- exit.m_kind, exit.m_kind == UncountableInvalidation);
+ exit.m_kind, isWatchpoint(exit.m_kind));
jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
}
exitCompiler.compileExit(exit, operands, recovery);
- LinkBuffer patchBuffer(*vm, jit, codeBlock);
+ LinkBuffer patchBuffer(*vm, &jit, codeBlock);
exit.m_code = FINALIZE_CODE_IF(
- shouldShowDisassembly() || Options::verboseOSR(),
+ shouldShowDisassembly(),
patchBuffer,
("DFG OSR exit #%u (%s, %s) from %s, with operands = %s",
exitIndex, toCString(exit.m_codeOrigin).data(),
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h
index cb262d427..cbaafcc1e 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGOSRExitCompiler_h
#define DFGOSRExitCompiler_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "AssemblyHelpers.h"
@@ -49,9 +51,28 @@ public:
void compileExit(const OSRExit&, const Operands<ValueRecovery>&, SpeculationRecovery*);
private:
- void emitRestoreArguments(const Operands<ValueRecovery>&);
+#if !ASSERT_DISABLED
+ static unsigned badIndex() { return static_cast<unsigned>(-1); };
+#endif
+
+ void initializePoisoned(unsigned size)
+ {
+#if ASSERT_DISABLED
+ m_poisonScratchIndices.resize(size);
+#else
+ m_poisonScratchIndices.fill(badIndex(), size);
+#endif
+ }
+
+ unsigned poisonIndex(unsigned index)
+ {
+ unsigned result = m_poisonScratchIndices[index];
+ ASSERT(result != badIndex());
+ return result;
+ }
CCallHelpers& m_jit;
+ Vector<unsigned> m_poisonScratchIndices;
};
extern "C" {
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
index 0851a58cf..9402d115e 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,25 +31,23 @@
#include "DFGOperations.h"
#include "DFGOSRExitCompilerCommon.h"
#include "DFGSpeculativeJIT.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include <wtf/DataLog.h>
namespace JSC { namespace DFG {
void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
{
- // Pro-forma stuff.
+ // 1) Pro-forma stuff.
if (Options::printEachOSRExit()) {
SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
debugInfo->codeBlock = m_jit.codeBlock();
- debugInfo->kind = exit.m_kind;
- debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
}
- // Perform speculation recovery. This only comes into play when an operation
- // starts mutating state before verifying the speculation it has already made.
+ // 2) Perform speculation recovery. This only comes into play when an operation
+ // starts mutating state before verifying the speculation it has already made.
if (recovery) {
switch (recovery->type()) {
@@ -65,7 +63,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
}
}
- // Refine some value profile, if appropriate.
+ // 3) Refine some value profile, if appropriate.
if (!!exit.m_jsValueSource) {
if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
@@ -102,8 +100,13 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
+#if CPU(ARM64)
+ m_jit.pushToSave(scratch1);
+ m_jit.pushToSave(scratch2);
+#else
m_jit.push(scratch1);
m_jit.push(scratch2);
+#endif
GPRReg value;
if (exit.m_jsValueSource.isAddress()) {
@@ -112,15 +115,20 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
} else
value = exit.m_jsValueSource.payloadGPR();
- m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1);
- m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructureID());
+ m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1);
+ m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure());
m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
m_jit.lshift32(scratch1, scratch2);
m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
+#if CPU(ARM64)
+ m_jit.popToRestore(scratch2);
+ m_jit.popToRestore(scratch1);
+#else
m_jit.pop(scratch2);
m_jit.pop(scratch1);
+#endif
}
}
@@ -131,14 +139,22 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
// Save a register so we can use it.
GPRReg scratch = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base());
+#if CPU(ARM64)
+ m_jit.pushToSave(scratch);
+#else
m_jit.push(scratch);
+#endif
m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
+#if CPU(ARM64)
+ m_jit.popToRestore(scratch);
+#else
m_jit.pop(scratch);
+#endif
} else if (exit.m_jsValueSource.hasKnownTag()) {
m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
@@ -152,7 +168,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
// Do a simplified OSR exit. See DFGOSRExitCompiler64.cpp's comment regarding how and wny we
// do this simple approach.
- // Save all state from GPRs into the scratch buffer.
+ // 4) Save all state from GPRs into the scratch buffer.
ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
@@ -185,7 +201,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
// Now all GPRs are free to reuse.
- // Save all state from FPRs into the scratch buffer.
+ // 5) Save all state from FPRs into the scratch buffer.
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
@@ -203,9 +219,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
// Now all FPRs are free to reuse.
- // Save all state from the stack into the scratch buffer. For simplicity we
- // do this even for state that's already in the right place on the stack.
- // It makes things simpler later.
+ // 6) Save all state from the stack into the scratch buffer. For simplicity we
+ // do this even for state that's already in the right place on the stack.
+ // It makes things simpler later.
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
@@ -235,15 +251,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
}
}
- // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This
- // could toast some stack that the DFG used. We need to do it before storing to stack offsets
- // used by baseline.
- m_jit.addPtr(
- CCallHelpers::TrustedImm32(
- -m_jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
- CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
+ // 7) Do all data format conversions and store the results into the stack.
- // Do all data format conversions and store the results into the stack.
+ bool haveArguments = false;
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
@@ -251,7 +261,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
switch (recovery.technique()) {
case InPair:
+ case InFPR:
case DisplacedInJSStack:
+ case DoubleDisplacedInJSStack:
m_jit.load32(
&bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag,
GPRInfo::regT0);
@@ -266,14 +278,6 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
AssemblyHelpers::payloadFor(operand));
break;
- case InFPR:
- case DoubleDisplacedInJSStack:
- m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
- m_jit.loadDouble(MacroAssembler::Address(GPRInfo::regT0), FPRInfo::fpRegT0);
- m_jit.purifyNaN(FPRInfo::fpRegT0);
- m_jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor(operand));
- break;
-
case UnboxedInt32InGPR:
case Int32DisplacedInJSStack:
m_jit.load32(
@@ -322,9 +326,14 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
AssemblyHelpers::payloadFor(operand));
break;
- case DirectArgumentsThatWereNotCreated:
- case ClonedArgumentsThatWereNotCreated:
- // Don't do this, yet.
+ case ArgumentsThatWereNotCreated:
+ haveArguments = true;
+ m_jit.store32(
+ AssemblyHelpers::TrustedImm32(JSValue().tag()),
+ AssemblyHelpers::tagFor(operand));
+ m_jit.store32(
+ AssemblyHelpers::TrustedImm32(JSValue().payload()),
+ AssemblyHelpers::payloadFor(operand));
break;
default:
@@ -332,57 +341,127 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
}
}
- // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
- // recoveries don't recursively refer to each other. But, we don't try to assume that they only
- // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
- // Note that we also roughly assume that the arguments might still be materialized outside of its
- // inline call frame scope - but for now the DFG wouldn't do that.
-
- emitRestoreArguments(operands);
-
- // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
- // that all new calls into this code will go to the new JIT, so the execute
- // counter only affects call frames that performed OSR exit and call frames
- // that were still executing the old JIT at the time of another call frame's
- // OSR exit. We want to ensure that the following is true:
+ // 8) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
+ // that all new calls into this code will go to the new JIT, so the execute
+ // counter only affects call frames that performed OSR exit and call frames
+ // that were still executing the old JIT at the time of another call frame's
+ // OSR exit. We want to ensure that the following is true:
//
- // (a) Code the performs an OSR exit gets a chance to reenter optimized
- // code eventually, since optimized code is faster. But we don't
- // want to do such reentery too aggressively (see (c) below).
+ // (a) Code the performs an OSR exit gets a chance to reenter optimized
+ // code eventually, since optimized code is faster. But we don't
+ // want to do such reentery too aggressively (see (c) below).
//
- // (b) If there is code on the call stack that is still running the old
- // JIT's code and has never OSR'd, then it should get a chance to
- // perform OSR entry despite the fact that we've exited.
+ // (b) If there is code on the call stack that is still running the old
+ // JIT's code and has never OSR'd, then it should get a chance to
+ // perform OSR entry despite the fact that we've exited.
//
- // (c) Code the performs an OSR exit should not immediately retry OSR
- // entry, since both forms of OSR are expensive. OSR entry is
- // particularly expensive.
+ // (c) Code the performs an OSR exit should not immediately retry OSR
+ // entry, since both forms of OSR are expensive. OSR entry is
+ // particularly expensive.
//
- // (d) Frequent OSR failures, even those that do not result in the code
- // running in a hot loop, result in recompilation getting triggered.
+ // (d) Frequent OSR failures, even those that do not result in the code
+ // running in a hot loop, result in recompilation getting triggered.
//
- // To ensure (c), we'd like to set the execute counter to
- // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
- // (a) and (b), since then every OSR exit would delay the opportunity for
- // every call frame to perform OSR entry. Essentially, if OSR exit happens
- // frequently and the function has few loops, then the counter will never
- // become non-negative and OSR entry will never be triggered. OSR entry
- // will only happen if a loop gets hot in the old JIT, which does a pretty
- // good job of ensuring (a) and (b). But that doesn't take care of (d),
- // since each speculation failure would reset the execute counter.
- // So we check here if the number of speculation failures is significantly
- // larger than the number of successes (we want 90% success rate), and if
- // there have been a large enough number of failures. If so, we set the
- // counter to 0; otherwise we set the counter to
- // counterValueForOptimizeAfterWarmUp().
+ // To ensure (c), we'd like to set the execute counter to
+ // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
+ // (a) and (b), since then every OSR exit would delay the opportunity for
+ // every call frame to perform OSR entry. Essentially, if OSR exit happens
+ // frequently and the function has few loops, then the counter will never
+ // become non-negative and OSR entry will never be triggered. OSR entry
+ // will only happen if a loop gets hot in the old JIT, which does a pretty
+ // good job of ensuring (a) and (b). But that doesn't take care of (d),
+ // since each speculation failure would reset the execute counter.
+ // So we check here if the number of speculation failures is significantly
+ // larger than the number of successes (we want 90% success rate), and if
+ // there have been a large enough number of failures. If so, we set the
+ // counter to 0; otherwise we set the counter to
+ // counterValueForOptimizeAfterWarmUp().
handleExitCounts(m_jit, exit);
- // Reify inlined call frames.
+ // 9) Reify inlined call frames.
reifyInlinedCallFrames(m_jit, exit);
- // And finish.
+ // 10) Create arguments if necessary and place them into the appropriate aliased
+ // registers.
+
+ if (haveArguments) {
+ HashSet<InlineCallFrame*, DefaultHash<InlineCallFrame*>::Hash,
+ NullableHashTraits<InlineCallFrame*>> didCreateArgumentsObject;
+
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
+ if (recovery.technique() != ArgumentsThatWereNotCreated)
+ continue;
+ int operand = operands.operandForIndex(index);
+ // Find the right inline call frame.
+ InlineCallFrame* inlineCallFrame = 0;
+ for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame;
+ current;
+ current = current->caller.inlineCallFrame) {
+ if (current->stackOffset >= operand) {
+ inlineCallFrame = current;
+ break;
+ }
+ }
+
+ if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments())
+ continue;
+ VirtualRegister argumentsRegister = m_jit.baselineArgumentsRegisterFor(inlineCallFrame);
+ if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) {
+ // We know this call frame optimized out an arguments object that
+ // the baseline JIT would have created. Do that creation now.
+ if (inlineCallFrame) {
+ m_jit.setupArgumentsWithExecState(
+ AssemblyHelpers::TrustedImmPtr(inlineCallFrame));
+ m_jit.move(
+ AssemblyHelpers::TrustedImmPtr(
+ bitwise_cast<void*>(operationCreateInlinedArguments)),
+ GPRInfo::nonArgGPR0);
+ } else {
+ m_jit.setupArgumentsExecState();
+ m_jit.move(
+ AssemblyHelpers::TrustedImmPtr(
+ bitwise_cast<void*>(operationCreateArguments)),
+ GPRInfo::nonArgGPR0);
+ }
+ m_jit.call(GPRInfo::nonArgGPR0);
+ m_jit.store32(
+ AssemblyHelpers::TrustedImm32(JSValue::CellTag),
+ AssemblyHelpers::tagFor(argumentsRegister));
+ m_jit.store32(
+ GPRInfo::returnValueGPR,
+ AssemblyHelpers::payloadFor(argumentsRegister));
+ m_jit.store32(
+ AssemblyHelpers::TrustedImm32(JSValue::CellTag),
+ AssemblyHelpers::tagFor(unmodifiedArgumentsRegister(argumentsRegister)));
+ m_jit.store32(
+ GPRInfo::returnValueGPR,
+ AssemblyHelpers::payloadFor(unmodifiedArgumentsRegister(argumentsRegister)));
+ m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
+ }
+
+ m_jit.load32(AssemblyHelpers::payloadFor(argumentsRegister), GPRInfo::regT0);
+ m_jit.store32(
+ AssemblyHelpers::TrustedImm32(JSValue::CellTag),
+ AssemblyHelpers::tagFor(operand));
+ m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor(operand));
+ }
+ }
+
+#if ENABLE(GGC)
+ // 11) Write barrier the owner executable because we're jumping into a different block.
+ for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
+ CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin);
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock->ownerExecutable()), GPRInfo::nonArgGPR0);
+ SpeculativeJIT::osrWriteBarrier(m_jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1, GPRInfo::nonArgGPR2);
+ if (!codeOrigin.inlineCallFrame)
+ break;
+ }
+#endif
+
+ // 12) And finish.
adjustAndJumpToTarget(m_jit, exit);
}
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
index 5bb0a4f50..219a5e68a 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,7 +31,7 @@
#include "DFGOperations.h"
#include "DFGOSRExitCompilerCommon.h"
#include "DFGSpeculativeJIT.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include "VirtualRegister.h"
#include <wtf/DataLog.h>
@@ -40,20 +40,16 @@ namespace JSC { namespace DFG {
void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
{
- m_jit.jitAssertTagsInPlace();
-
- // Pro-forma stuff.
+ // 1) Pro-forma stuff.
if (Options::printEachOSRExit()) {
SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
debugInfo->codeBlock = m_jit.codeBlock();
- debugInfo->kind = exit.m_kind;
- debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
}
- // Perform speculation recovery. This only comes into play when an operation
- // starts mutating state before verifying the speculation it has already made.
+ // 2) Perform speculation recovery. This only comes into play when an operation
+ // starts mutating state before verifying the speculation it has already made.
if (recovery) {
switch (recovery->type()) {
@@ -71,7 +67,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
}
}
- // Refine some array and/or value profile, if appropriate.
+ // 3) Refine some array and/or value profile, if appropriate.
if (!!exit.m_jsValueSource) {
if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
@@ -97,13 +93,13 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
- if (isARM64()) {
- m_jit.pushToSave(scratch1);
- m_jit.pushToSave(scratch2);
- } else {
- m_jit.push(scratch1);
- m_jit.push(scratch2);
- }
+#if CPU(ARM64)
+ m_jit.pushToSave(scratch1);
+ m_jit.pushToSave(scratch2);
+#else
+ m_jit.push(scratch1);
+ m_jit.push(scratch2);
+#endif
GPRReg value;
if (exit.m_jsValueSource.isAddress()) {
@@ -112,20 +108,20 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
} else
value = exit.m_jsValueSource.gpr();
- m_jit.load32(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1);
- m_jit.store32(scratch1, arrayProfile->addressOfLastSeenStructureID());
- m_jit.load8(AssemblyHelpers::Address(value, JSCell::indexingTypeOffset()), scratch1);
+ m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1);
+ m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure());
+ m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
m_jit.lshift32(scratch1, scratch2);
m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
- if (isARM64()) {
- m_jit.popToRestore(scratch2);
- m_jit.popToRestore(scratch1);
- } else {
- m_jit.pop(scratch2);
- m_jit.pop(scratch1);
- }
+#if CPU(ARM64)
+ m_jit.popToRestore(scratch2);
+ m_jit.popToRestore(scratch1);
+#else
+ m_jit.pop(scratch2);
+ m_jit.pop(scratch1);
+#endif
}
}
@@ -179,7 +175,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
// variable" from "how was it represented", which will make it more difficult to add
// features in the future and it will make it harder to reason about bugs.
- // Save all state from GPRs into the scratch buffer.
+ // 4) Save all state from GPRs into the scratch buffer.
ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
@@ -203,7 +199,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
// And voila, all GPRs are free to reuse.
- // Save all state from FPRs into the scratch buffer.
+ // 5) Save all state from FPRs into the scratch buffer.
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
@@ -211,7 +207,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
switch (recovery.technique()) {
case InFPR:
m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
- m_jit.storeDouble(recovery.fpr(), MacroAssembler::Address(GPRInfo::regT0));
+ m_jit.storeDouble(recovery.fpr(), GPRInfo::regT0);
break;
default:
@@ -221,9 +217,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
// Now, all FPRs are also free.
- // Save all state from the stack into the scratch buffer. For simplicity we
- // do this even for state that's already in the right place on the stack.
- // It makes things simpler later.
+ // 6) Save all state from the stack into the scratch buffer. For simplicity we
+ // do this even for state that's already in the right place on the stack.
+ // It makes things simpler later.
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
@@ -245,15 +241,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
}
}
- // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This
- // could toast some stack that the DFG used. We need to do it before storing to stack offsets
- // used by baseline.
- m_jit.addPtr(
- CCallHelpers::TrustedImm32(
- -m_jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
- CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
+ // 7) Do all data format conversions and store the results into the stack.
- // Do all data format conversions and store the results into the stack.
+ bool haveArguments = false;
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
@@ -296,8 +286,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
case InFPR:
case DoubleDisplacedInJSStack:
m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
- m_jit.loadDouble(MacroAssembler::Address(GPRInfo::regT0), FPRInfo::fpRegT0);
- m_jit.purifyNaN(FPRInfo::fpRegT0);
+ m_jit.loadDouble(GPRInfo::regT0, FPRInfo::fpRegT0);
m_jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
break;
@@ -308,68 +297,125 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
AssemblyHelpers::addressFor(operand));
break;
- case DirectArgumentsThatWereNotCreated:
- case ClonedArgumentsThatWereNotCreated:
- // Don't do this, yet.
+ case ArgumentsThatWereNotCreated:
+ haveArguments = true;
+ // We can't restore this yet but we can make sure that the stack appears
+ // sane.
+ m_jit.store64(
+ AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue())),
+ AssemblyHelpers::addressFor(operand));
break;
default:
- RELEASE_ASSERT_NOT_REACHED();
break;
}
}
- // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
- // recoveries don't recursively refer to each other. But, we don't try to assume that they only
- // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
- // Note that we also roughly assume that the arguments might still be materialized outside of its
- // inline call frame scope - but for now the DFG wouldn't do that.
-
- emitRestoreArguments(operands);
-
- // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
- // that all new calls into this code will go to the new JIT, so the execute
- // counter only affects call frames that performed OSR exit and call frames
- // that were still executing the old JIT at the time of another call frame's
- // OSR exit. We want to ensure that the following is true:
+ // 8) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
+ // that all new calls into this code will go to the new JIT, so the execute
+ // counter only affects call frames that performed OSR exit and call frames
+ // that were still executing the old JIT at the time of another call frame's
+ // OSR exit. We want to ensure that the following is true:
//
- // (a) Code the performs an OSR exit gets a chance to reenter optimized
- // code eventually, since optimized code is faster. But we don't
- // want to do such reentery too aggressively (see (c) below).
+ // (a) Code the performs an OSR exit gets a chance to reenter optimized
+ // code eventually, since optimized code is faster. But we don't
+ // want to do such reentery too aggressively (see (c) below).
//
- // (b) If there is code on the call stack that is still running the old
- // JIT's code and has never OSR'd, then it should get a chance to
- // perform OSR entry despite the fact that we've exited.
+ // (b) If there is code on the call stack that is still running the old
+ // JIT's code and has never OSR'd, then it should get a chance to
+ // perform OSR entry despite the fact that we've exited.
//
- // (c) Code the performs an OSR exit should not immediately retry OSR
- // entry, since both forms of OSR are expensive. OSR entry is
- // particularly expensive.
+ // (c) Code the performs an OSR exit should not immediately retry OSR
+ // entry, since both forms of OSR are expensive. OSR entry is
+ // particularly expensive.
//
- // (d) Frequent OSR failures, even those that do not result in the code
- // running in a hot loop, result in recompilation getting triggered.
+ // (d) Frequent OSR failures, even those that do not result in the code
+ // running in a hot loop, result in recompilation getting triggered.
//
- // To ensure (c), we'd like to set the execute counter to
- // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
- // (a) and (b), since then every OSR exit would delay the opportunity for
- // every call frame to perform OSR entry. Essentially, if OSR exit happens
- // frequently and the function has few loops, then the counter will never
- // become non-negative and OSR entry will never be triggered. OSR entry
- // will only happen if a loop gets hot in the old JIT, which does a pretty
- // good job of ensuring (a) and (b). But that doesn't take care of (d),
- // since each speculation failure would reset the execute counter.
- // So we check here if the number of speculation failures is significantly
- // larger than the number of successes (we want 90% success rate), and if
- // there have been a large enough number of failures. If so, we set the
- // counter to 0; otherwise we set the counter to
- // counterValueForOptimizeAfterWarmUp().
+ // To ensure (c), we'd like to set the execute counter to
+ // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
+ // (a) and (b), since then every OSR exit would delay the opportunity for
+ // every call frame to perform OSR entry. Essentially, if OSR exit happens
+ // frequently and the function has few loops, then the counter will never
+ // become non-negative and OSR entry will never be triggered. OSR entry
+ // will only happen if a loop gets hot in the old JIT, which does a pretty
+ // good job of ensuring (a) and (b). But that doesn't take care of (d),
+ // since each speculation failure would reset the execute counter.
+ // So we check here if the number of speculation failures is significantly
+ // larger than the number of successes (we want 90% success rate), and if
+ // there have been a large enough number of failures. If so, we set the
+ // counter to 0; otherwise we set the counter to
+ // counterValueForOptimizeAfterWarmUp().
handleExitCounts(m_jit, exit);
- // Reify inlined call frames.
+ // 9) Reify inlined call frames.
reifyInlinedCallFrames(m_jit, exit);
- // And finish.
+ // 10) Create arguments if necessary and place them into the appropriate aliased
+ // registers.
+
+ if (haveArguments) {
+ HashSet<InlineCallFrame*, DefaultHash<InlineCallFrame*>::Hash,
+ NullableHashTraits<InlineCallFrame*>> didCreateArgumentsObject;
+
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
+ if (recovery.technique() != ArgumentsThatWereNotCreated)
+ continue;
+ int operand = operands.operandForIndex(index);
+ // Find the right inline call frame.
+ InlineCallFrame* inlineCallFrame = 0;
+ for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame;
+ current;
+ current = current->caller.inlineCallFrame) {
+ if (current->stackOffset >= operand) {
+ inlineCallFrame = current;
+ break;
+ }
+ }
+
+ if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments())
+ continue;
+ VirtualRegister argumentsRegister = m_jit.baselineArgumentsRegisterFor(inlineCallFrame);
+ if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) {
+ // We know this call frame optimized out an arguments object that
+ // the baseline JIT would have created. Do that creation now.
+ if (inlineCallFrame) {
+ m_jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0);
+ m_jit.setupArguments(GPRInfo::regT0);
+ } else
+ m_jit.setupArgumentsExecState();
+ m_jit.move(
+ AssemblyHelpers::TrustedImmPtr(
+ bitwise_cast<void*>(operationCreateArguments)),
+ GPRInfo::nonArgGPR0);
+ m_jit.call(GPRInfo::nonArgGPR0);
+ m_jit.store64(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(argumentsRegister));
+ m_jit.store64(
+ GPRInfo::returnValueGPR,
+ AssemblyHelpers::addressFor(unmodifiedArgumentsRegister(argumentsRegister)));
+ m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
+ }
+
+ m_jit.load64(AssemblyHelpers::addressFor(argumentsRegister), GPRInfo::regT0);
+ m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
+ }
+ }
+
+#if ENABLE(GGC)
+ // 11) Write barrier the owner executable because we're jumping into a different block.
+ for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
+ CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin);
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock->ownerExecutable()), GPRInfo::nonArgGPR0);
+ SpeculativeJIT::osrWriteBarrier(m_jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1, GPRInfo::nonArgGPR2);
+ if (!codeOrigin.inlineCallFrame)
+ break;
+ }
+#endif
+
+ // 12) And finish.
adjustAndJumpToTarget(m_jit, exit);
}
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp
index 39b5bb5fc..9f84a2968 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,11 +28,10 @@
#if ENABLE(DFG_JIT)
-#include "DFGJITCode.h"
+#include "Arguments.h"
#include "DFGOperations.h"
-#include "JIT.h"
#include "JSCJSValueInlines.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -53,55 +52,20 @@ void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit)
AssemblyHelpers::GreaterThanOrEqual,
AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()),
AssemblyHelpers::TrustedImm32(0));
-
- // We want to figure out if there's a possibility that we're in a loop. For the outermost
- // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
- // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
- // problem is the inlined functions, which might also have loops, but whose baseline versions
- // don't know where to look for the exit count. Figure out if those loops are severe enough
- // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
- // Otherwise, we should use the normal reoptimization trigger.
-
- AssemblyHelpers::JumpList loopThreshold;
-
- for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) {
- loopThreshold.append(
- jit.branchTest8(
- AssemblyHelpers::NonZero,
- AssemblyHelpers::AbsoluteAddress(
- inlineCallFrame->executable->addressOfDidTryToEnterInLoop())));
- }
-
- jit.move(
- AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()),
- GPRInfo::regT1);
-
- if (!loopThreshold.empty()) {
- AssemblyHelpers::Jump done = jit.jump();
-
- loopThreshold.link(&jit);
- jit.move(
- AssemblyHelpers::TrustedImm32(
- jit.codeBlock()->exitCountThresholdForReoptimizationFromLoop()),
- GPRInfo::regT1);
- done.link(&jit);
- }
-
- tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
+ tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()));
reoptimizeNow.link(&jit);
// Reoptimize as soon as possible.
#if !NUMBER_OF_ARGUMENT_REGISTERS
jit.poke(GPRInfo::regT0);
- jit.poke(AssemblyHelpers::TrustedImmPtr(&exit), 1);
#else
jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0);
- jit.move(AssemblyHelpers::TrustedImmPtr(&exit), GPRInfo::argumentGPR1);
+ ASSERT(GPRInfo::argumentGPR0 != GPRInfo::regT1);
#endif
- jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::nonArgGPR0);
- jit.call(GPRInfo::nonArgGPR0);
+ jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::regT1);
+ jit.call(GPRInfo::regT1);
AssemblyHelpers::Jump doneAdjusting = jit.jump();
tooFewFails.link(&jit);
@@ -110,26 +74,13 @@ void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit)
int32_t activeThreshold =
jit.baselineCodeBlock()->adjustedCounterValue(
Options::thresholdForOptimizeAfterLongWarmUp());
- int32_t targetValue = applyMemoryUsageHeuristicsAndConvertToInt(
+ int32_t targetValue = ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
activeThreshold, jit.baselineCodeBlock());
- int32_t clippedValue;
- switch (jit.codeBlock()->jitType()) {
- case JITCode::DFGJIT:
- clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
- break;
- case JITCode::FTLJIT:
- clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
-#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
- clippedValue = 0; // Make some compilers, and mhahnenberg, happy.
-#endif
- break;
- }
+ int32_t clippedValue =
+ ExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
- jit.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
+ jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
doneAdjusting.link(&jit);
}
@@ -144,46 +95,10 @@ void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin);
CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller);
- void* jumpTarget = nullptr;
- void* trueReturnPC = nullptr;
-
unsigned callBytecodeIndex = inlineCallFrame->caller.bytecodeIndex;
+ CallLinkInfo& callLinkInfo = baselineCodeBlockForCaller->getCallLinkInfo(callBytecodeIndex);
- switch (inlineCallFrame->kind) {
- case InlineCallFrame::Call:
- case InlineCallFrame::Construct:
- case InlineCallFrame::CallVarargs:
- case InlineCallFrame::ConstructVarargs: {
- CallLinkInfo* callLinkInfo =
- baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
- RELEASE_ASSERT(callLinkInfo);
-
- jumpTarget = callLinkInfo->callReturnLocation().executableAddress();
- break;
- }
-
- case InlineCallFrame::GetterCall:
- case InlineCallFrame::SetterCall: {
- StructureStubInfo* stubInfo =
- baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
- RELEASE_ASSERT(stubInfo);
-
- switch (inlineCallFrame->kind) {
- case InlineCallFrame::GetterCall:
- jumpTarget = jit.vm()->getCTIStub(baselineGetterReturnThunkGenerator).code().executableAddress();
- break;
- case InlineCallFrame::SetterCall:
- jumpTarget = jit.vm()->getCTIStub(baselineSetterReturnThunkGenerator).code().executableAddress();
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
-
- trueReturnPC = stubInfo->callReturnLocation.labelAtOffset(
- stubInfo->patch.deltaCallToDone).executableAddress();
- break;
- } }
+ void* jumpTarget = callLinkInfo.callReturnLocation.executableAddress();
GPRReg callerFrameGPR;
if (inlineCallFrame->caller.inlineCallFrame) {
@@ -192,28 +107,47 @@ void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
} else
callerFrameGPR = GPRInfo::callFrameRegister;
- jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
- if (trueReturnPC)
- jit.storePtr(AssemblyHelpers::TrustedImmPtr(trueReturnPC), AssemblyHelpers::addressFor(inlineCallFrame->stackOffset + virtualRegisterForArgument(inlineCallFrame->arguments.size()).offset()));
-
- jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
- if (!inlineCallFrame->isVarargs())
- jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
#if USE(JSVALUE64)
+ jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
+ if (!inlineCallFrame->isClosureCall)
+ jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
+ jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
+ jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
if (!inlineCallFrame->isClosureCall)
jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
+
+ // Leave the captured arguments in regT3.
+ if (baselineCodeBlock->usesArguments())
+ jit.loadPtr(AssemblyHelpers::addressFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3);
#else // USE(JSVALUE64) // so this is the 32-bit part
+ jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
+ jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
+ if (!inlineCallFrame->isClosureCall)
+ jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
+ jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin.bytecodeIndex;
uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
+ jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
if (!inlineCallFrame->isClosureCall)
jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
+
+ // Leave the captured arguments in regT3.
+ if (baselineCodeBlock->usesArguments())
+ jit.loadPtr(AssemblyHelpers::payloadFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3);
#endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
+
+ if (baselineCodeBlock->usesArguments()) {
+ AssemblyHelpers::Jump noArguments = jit.branchTestPtr(AssemblyHelpers::Zero, GPRInfo::regT3);
+ jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0);
+ jit.storePtr(GPRInfo::regT0, AssemblyHelpers::Address(GPRInfo::regT3, Arguments::offsetOfRegisters()));
+ noArguments.link(&jit);
+ }
}
#if USE(JSVALUE64)
@@ -225,43 +159,8 @@ void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount)));
}
-#if ENABLE(GGC)
-static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch)
-{
- AssemblyHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(owner);
-
- // We need these extra slots because setupArgumentsWithExecState will use poke on x86.
-#if CPU(X86)
- jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
-#endif
-
- jit.setupArgumentsWithExecState(owner);
- jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch);
- jit.call(scratch);
-
-#if CPU(X86)
- jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
-#endif
-
- ownerIsRememberedOrInEden.link(&jit);
-}
-#endif // ENABLE(GGC)
-
void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit)
{
-#if ENABLE(GGC)
- jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()->ownerExecutable()), GPRInfo::nonArgGPR0);
- osrWriteBarrier(jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1);
- InlineCallFrameSet* inlineCallFrames = jit.codeBlock()->jitCode()->dfgCommon()->inlineCallFrames.get();
- if (inlineCallFrames) {
- for (InlineCallFrame* inlineCallFrame : *inlineCallFrames) {
- ScriptExecutable* ownerExecutable = inlineCallFrame->executable.get();
- jit.move(AssemblyHelpers::TrustedImmPtr(ownerExecutable), GPRInfo::nonArgGPR0);
- osrWriteBarrier(jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1);
- }
- }
-#endif
-
if (exit.m_codeOrigin.inlineCallFrame)
jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
@@ -274,11 +173,7 @@ void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit)
ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
void* jumpTarget = baselineCodeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
-
- jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(baselineCodeBlock) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister);
- jit.jitAssertTagsInPlace();
-
jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
jit.jump(GPRInfo::regT2);
}
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.h b/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.h
index ce1836fa1..8ceb8b6d4 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.h
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGOSRExitCompilerCommon_h
#define DFGOSRExitCompilerCommon_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "CCallHelpers.h"
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitFuzz.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitFuzz.cpp
deleted file mode 100644
index 570a6a02b..000000000
--- a/Source/JavaScriptCore/dfg/DFGOSRExitFuzz.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGOSRExitFuzz.h"
-
-#include "TestRunnerUtils.h"
-
-namespace JSC { namespace DFG {
-
-unsigned g_numberOfStaticOSRExitFuzzChecks;
-unsigned g_numberOfOSRExitFuzzChecks;
-
-} // namespace DFG
-
-unsigned numberOfStaticOSRExitFuzzChecks()
-{
- return DFG::g_numberOfStaticOSRExitFuzzChecks;
-}
-
-unsigned numberOfOSRExitFuzzChecks()
-{
- return DFG::g_numberOfOSRExitFuzzChecks;
-}
-
-} // namespace JSC
-
-
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitFuzz.h b/Source/JavaScriptCore/dfg/DFGOSRExitFuzz.h
deleted file mode 100644
index 2feee5902..000000000
--- a/Source/JavaScriptCore/dfg/DFGOSRExitFuzz.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGOSRExitFuzz_h
-#define DFGOSRExitFuzz_h
-
-#include "Options.h"
-
-namespace JSC { namespace DFG {
-
-extern unsigned g_numberOfStaticOSRExitFuzzChecks;
-
-inline bool doOSRExitFuzzing()
-{
- if (!Options::enableOSRExitFuzz())
- return false;
-
- g_numberOfStaticOSRExitFuzzChecks++;
- if (unsigned atStatic = Options::fireOSRExitFuzzAtStatic())
- return atStatic == g_numberOfStaticOSRExitFuzzChecks;
-
- return true;
-}
-
-// DFG- and FTL-generated code will query this on every speculation.
-extern unsigned g_numberOfOSRExitFuzzChecks;
-
-} } // namespace JSC::DFG
-
-#endif // DFGOSRExitFuzz_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.cpp
index 59780544d..fec99ec9a 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.cpp
@@ -30,7 +30,6 @@
#include "DFGJITCompiler.h"
#include "DFGSpeculativeJIT.h"
-#include "JSCInlines.h"
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.h b/Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.h
index 57cf7834a..4e016a406 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.h
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.h
@@ -26,6 +26,8 @@
#ifndef DFGOSRExitJumpPlaceholder_h
#define DFGOSRExitJumpPlaceholder_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp
index 51d6e5a0d..98e58a101 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,7 +32,7 @@
#include "Executable.h"
#include "JIT.h"
#include "JITCode.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -45,7 +45,7 @@ void prepareCodeOriginForOSRExit(ExecState* exec, CodeOrigin codeOrigin)
FunctionExecutable* executable =
static_cast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get());
CodeBlock* codeBlock = executable->baselineCodeBlockFor(
- codeOrigin.inlineCallFrame->specializationKind());
+ codeOrigin.inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
if (codeBlock->jitType() == JSC::JITCode::BaselineJIT)
continue;
diff --git a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp b/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp
deleted file mode 100644
index fd88910e5..000000000
--- a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp
+++ /dev/null
@@ -1,2136 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGObjectAllocationSinkingPhase.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGBlockMapInlines.h"
-#include "DFGCombinedLiveness.h"
-#include "DFGGraph.h"
-#include "DFGInsertionSet.h"
-#include "DFGLazyNode.h"
-#include "DFGLivenessAnalysisPhase.h"
-#include "DFGOSRAvailabilityAnalysisPhase.h"
-#include "DFGPhase.h"
-#include "DFGPromotedHeapLocation.h"
-#include "DFGSSACalculator.h"
-#include "DFGValidate.h"
-#include "JSCInlines.h"
-
-#include <list>
-
-namespace JSC { namespace DFG {
-
-namespace {
-
-bool verbose = false;
-
-// In order to sink object cycles, we use a points-to analysis coupled
-// with an escape analysis. This analysis is actually similar to an
-// abstract interpreter focused on local allocations and ignoring
-// everything else.
-//
-// We represent the local heap using two mappings:
-//
-// - A set of the local allocations present in the function, where
-// each of those have a further mapping from
-// PromotedLocationDescriptor to local allocations they must point
-// to.
-//
-// - A "pointer" mapping from nodes to local allocations, if they must
-// be equal to said local allocation and are currently live. This
-// can be because the node is the actual node that created the
-// allocation, or any other node that must currently point to it -
-// we don't make a difference.
-//
-// The following graph is a motivation for why we separate allocations
-// from pointers:
-//
-// Block #0
-// 0: NewObject({})
-// 1: NewObject({})
-// -: PutByOffset(@0, @1, x)
-// -: PutStructure(@0, {x:0})
-// 2: GetByOffset(@0, x)
-// -: Jump(#1)
-//
-// Block #1
-// -: Return(@2)
-//
-// Here, we need to remember in block #1 that @2 points to a local
-// allocation with appropriate fields and structures information
-// (because we should be able to place a materialization on top of
-// block #1 here), even though @1 is dead. We *could* just keep @1
-// artificially alive here, but there is no real reason to do it:
-// after all, by the end of block #0, @1 and @2 should be completely
-// interchangeable, and there is no reason for us to artificially make
-// @1 more important.
-//
-// An important point to consider to understand this separation is
-// that we should think of the local heap as follow: we have a
-// bunch of nodes that are pointers to "allocations" that live
-// someplace on the heap, and those allocations can have pointers in
-// between themselves as well. We shouldn't care about whatever
-// names we give to the allocations ; what matters when
-// comparing/merging two heaps is the isomorphism/comparison between
-// the allocation graphs as seen by the nodes.
-//
-// For instance, in the following graph:
-//
-// Block #0
-// 0: NewObject({})
-// -: Branch(#1, #2)
-//
-// Block #1
-// 1: NewObject({})
-// -: PutByOffset(@0, @1, x)
-// -: PutStructure(@0, {x:0})
-// -: Jump(#3)
-//
-// Block #2
-// 2: NewObject({})
-// -: PutByOffset(@2, undefined, x)
-// -: PutStructure(@2, {x:0})
-// -: PutByOffset(@0, @2, x)
-// -: PutStructure(@0, {x:0})
-// -: Jump(#3)
-//
-// Block #3
-// -: Return(@0)
-//
-// we should think of the heaps at tail of blocks #1 and #2 as being
-// exactly the same, even though one has @0.x pointing to @1 and the
-// other has @0.x pointing to @2, because in essence this should not
-// be different from the graph where we hoisted @1 and @2 into a
-// single allocation in block #0. We currently will not handle this
-// case, because we merge allocations based on the node they are
-// coming from, but this is only a technicality for the sake of
-// simplicity that shouldn't hide the deeper idea outlined here.
-
-class Allocation {
-public:
- // We use Escaped as a special allocation kind because when we
- // decide to sink an allocation, we still need to keep track of it
- // once it is escaped if it still has pointers to it in order to
- // replace any use of those pointers by the corresponding
- // materialization
- enum class Kind { Escaped, Object, Activation, Function };
-
- explicit Allocation(Node* identifier = nullptr, Kind kind = Kind::Escaped)
- : m_identifier(identifier)
- , m_kind(kind)
- {
- }
-
-
- const HashMap<PromotedLocationDescriptor, Node*>& fields() const
- {
- return m_fields;
- }
-
- Node* get(PromotedLocationDescriptor descriptor)
- {
- return m_fields.get(descriptor);
- }
-
- Allocation& set(PromotedLocationDescriptor descriptor, Node* value)
- {
- // Pointing to anything else than an unescaped local
- // allocation is represented by simply not having the
- // field
- if (value)
- m_fields.set(descriptor, value);
- else
- m_fields.remove(descriptor);
- return *this;
- }
-
- void remove(PromotedLocationDescriptor descriptor)
- {
- set(descriptor, nullptr);
- }
-
- bool hasStructures() const
- {
- switch (kind()) {
- case Kind::Object:
- return true;
-
- default:
- return false;
- }
- }
-
- Allocation& setStructures(const StructureSet& structures)
- {
- ASSERT(hasStructures() && !structures.isEmpty());
- m_structures = structures;
- return *this;
- }
-
- Allocation& mergeStructures(const StructureSet& structures)
- {
- ASSERT(hasStructures() || structures.isEmpty());
- m_structures.merge(structures);
- return *this;
- }
-
- Allocation& filterStructures(const StructureSet& structures)
- {
- ASSERT(hasStructures());
- m_structures.filter(structures);
- return *this;
- }
-
- const StructureSet& structures() const
- {
- return m_structures;
- }
-
- Node* identifier() const { return m_identifier; }
-
- Kind kind() const { return m_kind; }
-
- bool isEscapedAllocation() const
- {
- return kind() == Kind::Escaped;
- }
-
- bool isObjectAllocation() const
- {
- return m_kind == Kind::Object;
- }
-
- bool isActivationAllocation() const
- {
- return m_kind == Kind::Activation;
- }
-
- bool isFunctionAllocation() const
- {
- return m_kind == Kind::Function;
- }
-
- bool operator==(const Allocation& other) const
- {
- return m_identifier == other.m_identifier
- && m_kind == other.m_kind
- && m_fields == other.m_fields
- && m_structures == other.m_structures;
- }
-
- bool operator!=(const Allocation& other) const
- {
- return !(*this == other);
- }
-
- void dump(PrintStream& out) const
- {
- dumpInContext(out, nullptr);
- }
-
- void dumpInContext(PrintStream& out, DumpContext* context) const
- {
- switch (m_kind) {
- case Kind::Escaped:
- out.print("Escaped");
- break;
-
- case Kind::Object:
- out.print("Object");
- break;
-
- case Kind::Function:
- out.print("Function");
- break;
-
- case Kind::Activation:
- out.print("Activation");
- break;
- }
- out.print("Allocation(");
- if (!m_structures.isEmpty())
- out.print(inContext(m_structures, context));
- if (!m_fields.isEmpty()) {
- if (!m_structures.isEmpty())
- out.print(", ");
- out.print(mapDump(m_fields, " => #", ", "));
- }
- out.print(")");
- }
-
-private:
- Node* m_identifier; // This is the actual node that created the allocation
- Kind m_kind;
- HashMap<PromotedLocationDescriptor, Node*> m_fields;
- StructureSet m_structures;
-};
-
-class LocalHeap {
-public:
- Allocation& newAllocation(Node* node, Allocation::Kind kind)
- {
- ASSERT(!m_pointers.contains(node) && !isAllocation(node));
- m_pointers.add(node, node);
- return m_allocations.set(node, Allocation(node, kind)).iterator->value;
- }
-
- bool isAllocation(Node* identifier) const
- {
- return m_allocations.contains(identifier);
- }
-
- // Note that this is fundamentally different from
- // onlyLocalAllocation() below. getAllocation() takes as argument
- // a node-as-identifier, that is, an allocation node. This
- // allocation node doesn't have to be alive; it may only be
- // pointed to by other nodes or allocation fields.
- // For instance, in the following graph:
- //
- // Block #0
- // 0: NewObject({})
- // 1: NewObject({})
- // -: PutByOffset(@0, @1, x)
- // -: PutStructure(@0, {x:0})
- // 2: GetByOffset(@0, x)
- // -: Jump(#1)
- //
- // Block #1
- // -: Return(@2)
- //
- // At head of block #1, the only reachable allocation is #@1,
- // which can be reached through node @2. Thus, getAllocation(#@1)
- // contains the appropriate metadata for this allocation, but
- // onlyLocalAllocation(@1) is null, as @1 is no longer a pointer
- // to #@1 (since it is dead). Conversely, onlyLocalAllocation(@2)
- // is the same as getAllocation(#@1), while getAllocation(#@2)
- // does not make sense since @2 is not an allocation node.
- //
- // This is meant to be used when the node is already known to be
- // an identifier (i.e. an allocation) - probably because it was
- // found as value of a field or pointer in the current heap, or
- // was the result of a call to follow(). In any other cases (such
- // as when doing anything while traversing the graph), the
- // appropriate function to call is probably onlyLocalAllocation.
- Allocation& getAllocation(Node* identifier)
- {
- auto iter = m_allocations.find(identifier);
- ASSERT(iter != m_allocations.end());
- return iter->value;
- }
-
- void newPointer(Node* node, Node* identifier)
- {
- ASSERT(!m_allocations.contains(node) && !m_pointers.contains(node));
- ASSERT(isAllocation(identifier));
- m_pointers.add(node, identifier);
- }
-
- // follow solves the points-to problem. Given a live node, which
- // may be either an allocation itself or a heap read (e.g. a
- // GetByOffset node), it returns the corresponding allocation
- // node, if there is one. If the argument node is neither an
- // allocation or a heap read, or may point to different nodes,
- // nullptr will be returned. Note that a node that points to
- // different nodes can never point to an unescaped local
- // allocation.
- Node* follow(Node* node) const
- {
- auto iter = m_pointers.find(node);
- ASSERT(iter == m_pointers.end() || m_allocations.contains(iter->value));
- return iter == m_pointers.end() ? nullptr : iter->value;
- }
-
- Node* follow(PromotedHeapLocation location) const
- {
- const Allocation& base = m_allocations.find(location.base())->value;
- auto iter = base.fields().find(location.descriptor());
-
- if (iter == base.fields().end())
- return nullptr;
-
- return iter->value;
- }
-
- // onlyLocalAllocation find the corresponding allocation metadata
- // for any live node. onlyLocalAllocation(node) is essentially
- // getAllocation(follow(node)), with appropriate null handling.
- Allocation* onlyLocalAllocation(Node* node)
- {
- Node* identifier = follow(node);
- if (!identifier)
- return nullptr;
-
- return &getAllocation(identifier);
- }
-
- Allocation* onlyLocalAllocation(PromotedHeapLocation location)
- {
- Node* identifier = follow(location);
- if (!identifier)
- return nullptr;
-
- return &getAllocation(identifier);
- }
-
- // This allows us to store the escapees only when necessary. If
- // set, the current escapees can be retrieved at any time using
- // takeEscapees(), which will clear the cached set of escapees;
- // otherwise the heap won't remember escaping allocations.
- void setWantEscapees()
- {
- m_wantEscapees = true;
- }
-
- HashMap<Node*, Allocation> takeEscapees()
- {
- return WTF::move(m_escapees);
- }
-
- void escape(Node* node)
- {
- Node* identifier = follow(node);
- if (!identifier)
- return;
-
- escapeAllocation(identifier);
- }
-
- void merge(const LocalHeap& other)
- {
- assertIsValid();
- other.assertIsValid();
- ASSERT(!m_wantEscapees);
-
- if (!reached()) {
- ASSERT(other.reached());
- *this = other;
- return;
- }
-
- HashSet<Node*> toEscape;
-
- for (auto& allocationEntry : other.m_allocations)
- m_allocations.add(allocationEntry.key, allocationEntry.value);
- for (auto& allocationEntry : m_allocations) {
- auto allocationIter = other.m_allocations.find(allocationEntry.key);
-
- // If we have it and they don't, it died for them but we
- // are keeping it alive from another field somewhere.
- // There is nothing to do - we will be escaped
- // automatically when we handle that other field.
- // This will also happen for allocation that we have and
- // they don't, and all of those will get pruned.
- if (allocationIter == other.m_allocations.end())
- continue;
-
- if (allocationEntry.value.kind() != allocationIter->value.kind()) {
- toEscape.add(allocationEntry.key);
- for (const auto& fieldEntry : allocationIter->value.fields())
- toEscape.add(fieldEntry.value);
- } else {
- mergePointerSets(
- allocationEntry.value.fields(), allocationIter->value.fields(),
- [&] (Node* identifier) {
- toEscape.add(identifier);
- },
- [&] (PromotedLocationDescriptor field) {
- allocationEntry.value.remove(field);
- });
- allocationEntry.value.mergeStructures(allocationIter->value.structures());
- }
- }
-
- mergePointerSets(m_pointers, other.m_pointers,
- [&] (Node* identifier) {
- toEscape.add(identifier);
- },
- [&] (Node* field) {
- m_pointers.remove(field);
- });
-
- for (Node* identifier : toEscape)
- escapeAllocation(identifier);
-
- if (!ASSERT_DISABLED) {
- for (const auto& entry : m_allocations)
- ASSERT_UNUSED(entry, entry.value.isEscapedAllocation() || other.m_allocations.contains(entry.key));
- }
-
- // If there is no remaining pointer to an allocation, we can
- // remove it. This should only happen for escaped allocations,
- // because we only merge liveness-pruned heaps in the first
- // place.
- prune();
-
- assertIsValid();
- }
-
- void pruneByLiveness(const HashSet<Node*>& live)
- {
- Vector<Node*> toRemove;
- for (const auto& entry : m_pointers) {
- if (!live.contains(entry.key))
- toRemove.append(entry.key);
- }
- for (Node* node : toRemove)
- m_pointers.remove(node);
-
- prune();
- }
-
- void assertIsValid() const
- {
- if (ASSERT_DISABLED)
- return;
-
- // Pointers should point to an actual allocation
- for (const auto& entry : m_pointers) {
- ASSERT_UNUSED(entry, entry.value);
- ASSERT(m_allocations.contains(entry.value));
- }
-
- for (const auto& allocationEntry : m_allocations) {
- // Fields should point to an actual allocation
- for (const auto& fieldEntry : allocationEntry.value.fields()) {
- ASSERT_UNUSED(fieldEntry, fieldEntry.value);
- ASSERT(m_allocations.contains(fieldEntry.value));
- }
- }
- }
-
- bool operator==(const LocalHeap& other) const
- {
- assertIsValid();
- other.assertIsValid();
- return m_allocations == other.m_allocations
- && m_pointers == other.m_pointers;
- }
-
- bool operator!=(const LocalHeap& other) const
- {
- return !(*this == other);
- }
-
- const HashMap<Node*, Allocation>& allocations() const
- {
- return m_allocations;
- }
-
- const HashMap<Node*, Node*>& pointers() const
- {
- return m_pointers;
- }
-
- void dump(PrintStream& out) const
- {
- out.print(" Allocations:\n");
- for (const auto& entry : m_allocations)
- out.print(" #", entry.key, ": ", entry.value, "\n");
- out.print(" Pointers:\n");
- for (const auto& entry : m_pointers)
- out.print(" ", entry.key, " => #", entry.value, "\n");
- }
-
- bool reached() const
- {
- return m_reached;
- }
-
- void setReached()
- {
- m_reached = true;
- }
-
-private:
- // When we merge two heaps, we escape all fields of allocations,
- // unless they point to the same thing in both heaps.
- // The reason for this is that it allows us not to do extra work
- // for diamond graphs where we would otherwise have to check
- // whether we have a single definition or not, which would be
- // cumbersome.
- //
- // Note that we should try to unify nodes even when they are not
- // from the same allocation; for instance we should be able to
- // completely eliminate all allocations from the following graph:
- //
- // Block #0
- // 0: NewObject({})
- // -: Branch(#1, #2)
- //
- // Block #1
- // 1: NewObject({})
- // -: PutByOffset(@1, "left", val)
- // -: PutStructure(@1, {val:0})
- // -: PutByOffset(@0, @1, x)
- // -: PutStructure(@0, {x:0})
- // -: Jump(#3)
- //
- // Block #2
- // 2: NewObject({})
- // -: PutByOffset(@2, "right", val)
- // -: PutStructure(@2, {val:0})
- // -: PutByOffset(@0, @2, x)
- // -: PutStructure(@0, {x:0})
- // -: Jump(#3)
- //
- // Block #3:
- // 3: GetByOffset(@0, x)
- // 4: GetByOffset(@3, val)
- // -: Return(@4)
- template<typename Key, typename EscapeFunctor, typename RemoveFunctor>
- void mergePointerSets(
- const HashMap<Key, Node*>& my, const HashMap<Key, Node*>& their,
- const EscapeFunctor& escape, const RemoveFunctor& remove)
- {
- Vector<Key> toRemove;
- for (const auto& entry : my) {
- auto iter = their.find(entry.key);
- if (iter == their.end()) {
- toRemove.append(entry.key);
- escape(entry.value);
- } else if (iter->value != entry.value) {
- toRemove.append(entry.key);
- escape(entry.value);
- escape(iter->value);
- }
- }
- for (const auto& entry : their) {
- if (my.contains(entry.key))
- continue;
- escape(entry.value);
- }
- for (Key key : toRemove)
- remove(key);
- }
-
- void escapeAllocation(Node* identifier)
- {
- Allocation& allocation = getAllocation(identifier);
- if (allocation.isEscapedAllocation())
- return;
-
- Allocation unescaped = WTF::move(allocation);
- allocation = Allocation(unescaped.identifier(), Allocation::Kind::Escaped);
-
- for (const auto& entry : unescaped.fields())
- escapeAllocation(entry.value);
-
- if (m_wantEscapees)
- m_escapees.add(unescaped.identifier(), WTF::move(unescaped));
- }
-
- void prune()
- {
- HashSet<Node*> reachable;
- for (const auto& entry : m_pointers)
- reachable.add(entry.value);
-
- // Repeatedly mark as reachable allocations in fields of other
- // reachable allocations
- {
- Vector<Node*> worklist;
- worklist.appendRange(reachable.begin(), reachable.end());
-
- while (!worklist.isEmpty()) {
- Node* identifier = worklist.takeLast();
- Allocation& allocation = m_allocations.find(identifier)->value;
- for (const auto& entry : allocation.fields()) {
- if (reachable.add(entry.value).isNewEntry)
- worklist.append(entry.value);
- }
- }
- }
-
- // Remove unreachable allocations
- {
- Vector<Node*> toRemove;
- for (const auto& entry : m_allocations) {
- if (!reachable.contains(entry.key))
- toRemove.append(entry.key);
- }
- for (Node* identifier : toRemove)
- m_allocations.remove(identifier);
- }
- }
-
- bool m_reached = false;
- HashMap<Node*, Node*> m_pointers;
- HashMap<Node*, Allocation> m_allocations;
-
- bool m_wantEscapees = false;
- HashMap<Node*, Allocation> m_escapees;
-};
-
-class ObjectAllocationSinkingPhase : public Phase {
-public:
- ObjectAllocationSinkingPhase(Graph& graph)
- : Phase(graph, "object allocation elimination")
- , m_pointerSSA(graph)
- , m_allocationSSA(graph)
- , m_insertionSet(graph)
- {
- }
-
- bool run()
- {
- ASSERT(m_graph.m_form == SSA);
- ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
-
- if (!performSinking())
- return false;
-
- if (verbose) {
- dataLog("Graph after elimination:\n");
- m_graph.dump();
- }
-
- return true;
- }
-
-private:
- bool performSinking()
- {
- m_graph.computeRefCounts();
- m_graph.initializeNodeOwners();
- performLivenessAnalysis(m_graph);
- performOSRAvailabilityAnalysis(m_graph);
- m_combinedLiveness = CombinedLiveness(m_graph);
-
- CString graphBeforeSinking;
- if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
- StringPrintStream out;
- m_graph.dump(out);
- graphBeforeSinking = out.toCString();
- }
-
- if (verbose) {
- dataLog("Graph before elimination:\n");
- m_graph.dump();
- }
-
- performAnalysis();
-
- if (!determineSinkCandidates())
- return false;
-
- if (verbose) {
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- dataLog("Heap at head of ", *block, ": \n", m_heapAtHead[block]);
- dataLog("Heap at tail of ", *block, ": \n", m_heapAtTail[block]);
- }
- }
-
- promoteLocalHeap();
-
- if (Options::validateGraphAtEachPhase())
- validate(m_graph, DumpGraph, graphBeforeSinking);
- return true;
- }
-
- void performAnalysis()
- {
- m_heapAtHead = BlockMap<LocalHeap>(m_graph);
- m_heapAtTail = BlockMap<LocalHeap>(m_graph);
-
- bool changed;
- do {
- if (verbose)
- dataLog("Doing iteration of escape analysis.\n");
- changed = false;
-
- for (BasicBlock* block : m_graph.blocksInPreOrder()) {
- m_heapAtHead[block].setReached();
- m_heap = m_heapAtHead[block];
-
- for (Node* node : *block) {
- handleNode(
- node,
- [] (PromotedHeapLocation, LazyNode) { },
- [&] (PromotedHeapLocation) -> Node* {
- return nullptr;
- });
- }
-
- if (m_heap == m_heapAtTail[block])
- continue;
-
- m_heapAtTail[block] = m_heap;
- changed = true;
-
- m_heap.assertIsValid();
-
- // We keep only pointers that are live, and only
- // allocations that are either live, pointed to by a
- // live pointer, or (recursively) stored in a field of
- // a live allocation.
- //
- // This means we can accidentaly leak non-dominating
- // nodes into the successor. However, due to the
- // non-dominance property, we are guaranteed that the
- // successor has at least one predecessor that is not
- // dominated either: this means any reference to a
- // non-dominating allocation in the successor will
- // trigger an escape and get pruned during the merge.
- m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
-
- for (BasicBlock* successorBlock : block->successors())
- m_heapAtHead[successorBlock].merge(m_heap);
- }
- } while (changed);
- }
-
- template<typename WriteFunctor, typename ResolveFunctor>
- void handleNode(
- Node* node,
- const WriteFunctor& heapWrite,
- const ResolveFunctor& heapResolve)
- {
- m_heap.assertIsValid();
- ASSERT(m_heap.takeEscapees().isEmpty());
-
- Allocation* target = nullptr;
- HashMap<PromotedLocationDescriptor, LazyNode> writes;
- PromotedLocationDescriptor exactRead;
-
- switch (node->op()) {
- case NewObject:
- target = &m_heap.newAllocation(node, Allocation::Kind::Object);
- target->setStructures(node->structure());
- writes.add(
- StructurePLoc, LazyNode(m_graph.freeze(node->structure())));
- break;
-
- case MaterializeNewObject: {
- target = &m_heap.newAllocation(node, Allocation::Kind::Object);
- target->setStructures(node->structureSet());
- writes.add(
- StructurePLoc, LazyNode(m_graph.varArgChild(node, 0).node()));
- for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
- writes.add(
- PromotedLocationDescriptor(
- NamedPropertyPLoc,
- node->objectMaterializationData().m_properties[i].m_identifierNumber),
- LazyNode(m_graph.varArgChild(node, i + 1).node()));
- }
- break;
- }
-
- case NewFunction: {
- if (node->castOperand<FunctionExecutable*>()->singletonFunction()->isStillValid()) {
- m_heap.escape(node->child1().node());
- break;
- }
- target = &m_heap.newAllocation(node, Allocation::Kind::Function);
- writes.add(FunctionExecutablePLoc, LazyNode(node->cellOperand()));
- writes.add(FunctionActivationPLoc, LazyNode(node->child1().node()));
- break;
- }
-
- case CreateActivation: {
- if (node->castOperand<SymbolTable*>()->singletonScope()->isStillValid()) {
- m_heap.escape(node->child1().node());
- break;
- }
- target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
- writes.add(ActivationSymbolTablePLoc, LazyNode(node->cellOperand()));
- writes.add(ActivationScopePLoc, LazyNode(node->child1().node()));
- {
- SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
- ConcurrentJITLocker locker(symbolTable->m_lock);
- LazyNode initialValue(m_graph.freeze(node->initializationValueForActivation()));
- for (auto iter = symbolTable->begin(locker), end = symbolTable->end(locker); iter != end; ++iter) {
- writes.add(
- PromotedLocationDescriptor(ClosureVarPLoc, iter->value.scopeOffset().offset()),
- initialValue);
- }
- }
- break;
- }
-
- case MaterializeCreateActivation: {
- // We have sunk this once already - there is no way the
- // watchpoint is still valid.
- ASSERT(!node->castOperand<SymbolTable*>()->singletonScope()->isStillValid());
- target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
- writes.add(ActivationSymbolTablePLoc, LazyNode(m_graph.varArgChild(node, 0).node()));
- writes.add(ActivationScopePLoc, LazyNode(m_graph.varArgChild(node, 1).node()));
- for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
- writes.add(
- PromotedLocationDescriptor(
- ClosureVarPLoc,
- node->objectMaterializationData().m_properties[i].m_identifierNumber),
- LazyNode(m_graph.varArgChild(node, i + 2).node()));
- }
- break;
- }
-
- case PutStructure:
- target = m_heap.onlyLocalAllocation(node->child1().node());
- if (target && target->isObjectAllocation()) {
- writes.add(StructurePLoc, LazyNode(m_graph.freeze(JSValue(node->transition()->next))));
- target->setStructures(node->transition()->next);
- } else
- m_heap.escape(node->child1().node());
- break;
-
- case CheckStructure: {
- Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
- if (allocation && allocation->isObjectAllocation()) {
- allocation->filterStructures(node->structureSet());
- if (Node* value = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)))
- node->convertToCheckStructureImmediate(value);
- } else
- m_heap.escape(node->child1().node());
- break;
- }
-
- case GetByOffset:
- case GetGetterSetterByOffset:
- target = m_heap.onlyLocalAllocation(node->child2().node());
- if (target && target->isObjectAllocation()) {
- unsigned identifierNumber = node->storageAccessData().identifierNumber;
- exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
- } else {
- m_heap.escape(node->child1().node());
- m_heap.escape(node->child2().node());
- }
- break;
-
- case MultiGetByOffset:
- target = m_heap.onlyLocalAllocation(node->child1().node());
- if (target && target->isObjectAllocation()) {
- unsigned identifierNumber = node->multiGetByOffsetData().identifierNumber;
- exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
- } else
- m_heap.escape(node->child1().node());
- break;
-
- case PutByOffset:
- target = m_heap.onlyLocalAllocation(node->child2().node());
- if (target && target->isObjectAllocation()) {
- unsigned identifierNumber = node->storageAccessData().identifierNumber;
- writes.add(
- PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber),
- LazyNode(node->child3().node()));
- } else {
- m_heap.escape(node->child1().node());
- m_heap.escape(node->child2().node());
- m_heap.escape(node->child3().node());
- }
- break;
-
- case GetClosureVar:
- target = m_heap.onlyLocalAllocation(node->child1().node());
- if (target && target->isActivationAllocation()) {
- exactRead =
- PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset());
- } else
- m_heap.escape(node->child1().node());
- break;
-
- case PutClosureVar:
- target = m_heap.onlyLocalAllocation(node->child1().node());
- if (target && target->isActivationAllocation()) {
- writes.add(
- PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()),
- LazyNode(node->child2().node()));
- } else {
- m_heap.escape(node->child1().node());
- m_heap.escape(node->child2().node());
- }
- break;
-
- case SkipScope:
- target = m_heap.onlyLocalAllocation(node->child1().node());
- if (target && target->isActivationAllocation())
- exactRead = ActivationScopePLoc;
- else
- m_heap.escape(node->child1().node());
- break;
-
- case GetExecutable:
- target = m_heap.onlyLocalAllocation(node->child1().node());
- if (target && target->isFunctionAllocation())
- exactRead = FunctionExecutablePLoc;
- else
- m_heap.escape(node->child1().node());
- break;
-
- case GetScope:
- target = m_heap.onlyLocalAllocation(node->child1().node());
- if (target && target->isFunctionAllocation())
- exactRead = FunctionActivationPLoc;
- else
- m_heap.escape(node->child1().node());
- break;
-
- case Check:
- m_graph.doToChildren(
- node,
- [&] (Edge edge) {
- if (edge.willNotHaveCheck())
- return;
-
- if (alreadyChecked(edge.useKind(), SpecObject))
- return;
-
- m_heap.escape(edge.node());
- });
- break;
-
- case MovHint:
- case PutHint:
- // Handled by OSR availability analysis
- break;
-
- default:
- m_graph.doToChildren(
- node,
- [&] (Edge edge) {
- m_heap.escape(edge.node());
- });
- break;
- }
-
- if (exactRead) {
- ASSERT(target);
- ASSERT(writes.isEmpty());
- if (Node* value = heapResolve(PromotedHeapLocation(target->identifier(), exactRead))) {
- ASSERT(!value->replacement());
- node->replaceWith(value);
- }
- Node* identifier = target->get(exactRead);
- if (identifier)
- m_heap.newPointer(node, identifier);
- }
-
- for (auto entry : writes) {
- ASSERT(target);
- if (entry.value.isNode())
- target->set(entry.key, m_heap.follow(entry.value.asNode()));
- else
- target->remove(entry.key);
- heapWrite(PromotedHeapLocation(target->identifier(), entry.key), entry.value);
- }
-
- m_heap.assertIsValid();
- }
-
- bool determineSinkCandidates()
- {
- m_sinkCandidates.clear();
- m_materializationToEscapee.clear();
- m_materializationSiteToMaterializations.clear();
- m_materializationSiteToRecoveries.clear();
-
- // Logically we wish to consider every allocation and sink
- // it. However, it is probably not profitable to sink an
- // allocation that will always escape. So, we only sink an
- // allocation if one of the following is true:
- //
- // 1) There exists a basic block with only backwards outgoing
- // edges (or no outgoing edges) in which the node wasn't
- // materialized. This is meant to catch
- // effectively-infinite loops in which we don't need to
- // have allocated the object.
- //
- // 2) There exists a basic block at the tail of which the node
- // is dead and not materialized.
- //
- // 3) The sum of execution counts of the materializations is
- // less than the sum of execution counts of the original
- // node.
- //
- // We currently implement only rule #2.
- // FIXME: Implement the two other rules.
- // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
- // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
- //
- // However, these rules allow for a sunk object to be put into
- // a non-sunk one, which we don't support. We could solve this
- // by supporting PutHints on local allocations, making these
- // objects only partially correct, and we would need to adapt
- // the OSR availability analysis and OSR exit to handle
- // this. This would be totally doable, but would create a
- // super rare, and thus bug-prone, code path.
- // So, instead, we need to implement one of the following
- // closure rules:
- //
- // 1) If we put a sink candidate into a local allocation that
- // is not a sink candidate, change our minds and don't
- // actually sink the sink candidate.
- //
- // 2) If we put a sink candidate into a local allocation, that
- // allocation becomes a sink candidate as well.
- //
- // We currently choose to implement closure rule #2.
- HashMap<Node*, Vector<Node*>> dependencies;
- bool hasUnescapedReads = false;
- for (BasicBlock* block : m_graph.blocksInPreOrder()) {
- m_heap = m_heapAtHead[block];
-
- for (Node* node : *block) {
- handleNode(
- node,
- [&] (PromotedHeapLocation location, LazyNode value) {
- if (!value.isNode())
- return;
-
- Allocation* allocation = m_heap.onlyLocalAllocation(value.asNode());
- if (allocation && !allocation->isEscapedAllocation())
- dependencies.add(allocation->identifier(), Vector<Node*>()).iterator->value.append(location.base());
- },
- [&] (PromotedHeapLocation) -> Node* {
- hasUnescapedReads = true;
- return nullptr;
- });
- }
-
- // The sink candidates are initially the unescaped
- // allocations dying at tail of blocks
- HashSet<Node*> allocations;
- for (const auto& entry : m_heap.allocations()) {
- if (!entry.value.isEscapedAllocation())
- allocations.add(entry.key);
- }
-
- m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
-
- for (Node* identifier : allocations) {
- if (!m_heap.isAllocation(identifier))
- m_sinkCandidates.add(identifier);
- }
- }
-
- // Ensure that the set of sink candidates is closed for put operations
- Vector<Node*> worklist;
- worklist.appendRange(m_sinkCandidates.begin(), m_sinkCandidates.end());
-
- while (!worklist.isEmpty()) {
- for (Node* identifier : dependencies.get(worklist.takeLast())) {
- if (m_sinkCandidates.add(identifier).isNewEntry)
- worklist.append(identifier);
- }
- }
-
- if (m_sinkCandidates.isEmpty())
- return hasUnescapedReads;
-
- if (verbose)
- dataLog("Candidates: ", listDump(m_sinkCandidates), "\n");
-
- // Create the materialization nodes
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- m_heap = m_heapAtHead[block];
- m_heap.setWantEscapees();
-
- for (Node* node : *block) {
- handleNode(
- node,
- [] (PromotedHeapLocation, LazyNode) { },
- [] (PromotedHeapLocation) -> Node* {
- return nullptr;
- });
- auto escapees = m_heap.takeEscapees();
- if (!escapees.isEmpty())
- placeMaterializations(escapees, node);
- }
-
- m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
-
- {
- HashMap<Node*, Allocation> escapingOnEdge;
- for (const auto& entry : m_heap.allocations()) {
- if (entry.value.isEscapedAllocation())
- continue;
-
- bool mustEscape = false;
- for (BasicBlock* successorBlock : block->successors()) {
- if (!m_heapAtHead[successorBlock].isAllocation(entry.key)
- || m_heapAtHead[successorBlock].getAllocation(entry.key).isEscapedAllocation())
- mustEscape = true;
- }
-
- if (mustEscape)
- escapingOnEdge.add(entry.key, entry.value);
- }
- placeMaterializations(WTF::move(escapingOnEdge), block->terminal());
- }
- }
-
- return hasUnescapedReads || !m_sinkCandidates.isEmpty();
- }
-
- void placeMaterializations(HashMap<Node*, Allocation> escapees, Node* where)
- {
- // We don't create materializations if the escapee is not a
- // sink candidate
- Vector<Node*> toRemove;
- for (const auto& entry : escapees) {
- if (!m_sinkCandidates.contains(entry.key))
- toRemove.append(entry.key);
- }
- for (Node* identifier : toRemove)
- escapees.remove(identifier);
-
- if (escapees.isEmpty())
- return;
-
- // First collect the hints that will be needed when the node
- // we materialize is still stored into other unescaped sink candidates
- Vector<PromotedHeapLocation> hints;
- for (const auto& entry : m_heap.allocations()) {
- if (escapees.contains(entry.key))
- continue;
-
- for (const auto& field : entry.value.fields()) {
- ASSERT(m_sinkCandidates.contains(entry.key) || !escapees.contains(field.value));
- if (escapees.contains(field.value) && !field.key.neededForMaterialization())
- hints.append(PromotedHeapLocation(entry.key, field.key));
- }
- }
-
- // Now we need to order the materialization. Any order is
- // valid (as long as we materialize a node first if it is
- // needed for the materialization of another node, e.g. a
- // function's activation must be materialized before the
- // function itself), but we want to try minimizing the number
- // of times we have to place Puts to close cycles after a
- // materialization. In other words, we are trying to find the
- // minimum number of materializations to remove from the
- // materialization graph to make it a DAG, known as the
- // (vertex) feedback set problem. Unfortunately, this is a
- // NP-hard problem, which we don't want to solve exactly.
- //
- // Instead, we use a simple greedy procedure, that procedes as
- // follow:
- // - While there is at least one node with no outgoing edge
- // amongst the remaining materializations, materialize it
- // first
- //
- // - Similarily, while there is at least one node with no
- // incoming edge amongst the remaining materializations,
- // materialize it last.
- //
- // - When both previous conditions are false, we have an
- // actual cycle, and we need to pick a node to
- // materialize. We try greedily to remove the "pressure" on
- // the remaining nodes by choosing the node with maximum
- // |incoming edges| * |outgoing edges| as a measure of how
- // "central" to the graph it is. We materialize it first,
- // so that all the recoveries will be Puts of things into
- // it (rather than Puts of the materialization into other
- // objects), which means we will have a single
- // StoreBarrier.
-
-
- // Compute dependencies between materializations
- HashMap<Node*, HashSet<Node*>> dependencies;
- HashMap<Node*, HashSet<Node*>> reverseDependencies;
- HashMap<Node*, HashSet<Node*>> forMaterialization;
- for (const auto& entry : escapees) {
- auto& myDependencies = dependencies.add(entry.key, HashSet<Node*>()).iterator->value;
- auto& myDependenciesForMaterialization = forMaterialization.add(entry.key, HashSet<Node*>()).iterator->value;
- reverseDependencies.add(entry.key, HashSet<Node*>());
- for (const auto& field : entry.value.fields()) {
- if (escapees.contains(field.value) && field.value != entry.key) {
- myDependencies.add(field.value);
- reverseDependencies.add(field.value, HashSet<Node*>()).iterator->value.add(entry.key);
- if (field.key.neededForMaterialization())
- myDependenciesForMaterialization.add(field.value);
- }
- }
- }
-
- // Helper function to update the materialized set and the
- // dependencies
- HashSet<Node*> materialized;
- auto materialize = [&] (Node* identifier) {
- materialized.add(identifier);
- for (Node* dep : dependencies.get(identifier))
- reverseDependencies.find(dep)->value.remove(identifier);
- for (Node* rdep : reverseDependencies.get(identifier)) {
- dependencies.find(rdep)->value.remove(identifier);
- forMaterialization.find(rdep)->value.remove(identifier);
- }
- dependencies.remove(identifier);
- reverseDependencies.remove(identifier);
- forMaterialization.remove(identifier);
- };
-
- // Nodes without remaining unmaterialized fields will be
- // materialized first - amongst the remaining unmaterialized
- // nodes
- std::list<Allocation> toMaterialize;
- auto firstPos = toMaterialize.begin();
- auto materializeFirst = [&] (Allocation&& allocation) {
- materialize(allocation.identifier());
- // We need to insert *after* the current position
- if (firstPos != toMaterialize.end())
- ++firstPos;
- firstPos = toMaterialize.insert(firstPos, WTF::move(allocation));
- };
-
- // Nodes that no other unmaterialized node points to will be
- // materialized last - amongst the remaining unmaterialized
- // nodes
- auto lastPos = toMaterialize.end();
- auto materializeLast = [&] (Allocation&& allocation) {
- materialize(allocation.identifier());
- lastPos = toMaterialize.insert(lastPos, WTF::move(allocation));
- };
-
- // These are the promoted locations that contains some of the
- // allocations we are currently escaping. If they are a location on
- // some other allocation we are currently materializing, we will need
- // to "recover" their value with a real put once the corresponding
- // allocation is materialized; if they are a location on some other
- // not-yet-materialized allocation, we will need a PutHint.
- Vector<PromotedHeapLocation> toRecover;
-
- // This loop does the actual cycle breaking
- while (!escapees.isEmpty()) {
- materialized.clear();
-
- // Materialize nodes that won't require recoveries if we can
- for (auto& entry : escapees) {
- if (!forMaterialization.find(entry.key)->value.isEmpty())
- continue;
-
- if (dependencies.find(entry.key)->value.isEmpty()) {
- materializeFirst(WTF::move(entry.value));
- continue;
- }
-
- if (reverseDependencies.find(entry.key)->value.isEmpty()) {
- materializeLast(WTF::move(entry.value));
- continue;
- }
- }
-
- // We reach this only if there is an actual cycle that needs
- // breaking. Because we do not want to solve a NP-hard problem
- // here, we just heuristically pick a node and materialize it
- // first.
- if (materialized.isEmpty()) {
- uint64_t maxEvaluation = 0;
- Allocation* bestAllocation;
- for (auto& entry : escapees) {
- if (!forMaterialization.find(entry.key)->value.isEmpty())
- continue;
-
- uint64_t evaluation =
- static_cast<uint64_t>(dependencies.get(entry.key).size()) * reverseDependencies.get(entry.key).size();
- if (evaluation > maxEvaluation) {
- maxEvaluation = evaluation;
- bestAllocation = &entry.value;
- }
- }
- RELEASE_ASSERT(maxEvaluation > 0);
-
- materializeFirst(WTF::move(*bestAllocation));
- }
- RELEASE_ASSERT(!materialized.isEmpty());
-
- for (Node* identifier : materialized)
- escapees.remove(identifier);
- }
-
- materialized.clear();
-
- HashSet<Node*> escaped;
- for (const Allocation& allocation : toMaterialize)
- escaped.add(allocation.identifier());
- for (const Allocation& allocation : toMaterialize) {
- for (const auto& field : allocation.fields()) {
- if (escaped.contains(field.value) && !materialized.contains(field.value))
- toRecover.append(PromotedHeapLocation(allocation.identifier(), field.key));
- }
- materialized.add(allocation.identifier());
- }
-
- Vector<Node*>& materializations = m_materializationSiteToMaterializations.add(
- where, Vector<Node*>()).iterator->value;
-
- for (const Allocation& allocation : toMaterialize) {
- Node* materialization = createMaterialization(allocation, where);
- materializations.append(materialization);
- m_materializationToEscapee.add(materialization, allocation.identifier());
- }
-
- if (!toRecover.isEmpty()) {
- m_materializationSiteToRecoveries.add(
- where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(toRecover);
- }
-
- // The hints need to be after the "real" recoveries so that we
- // don't hint not-yet-complete objects
- if (!hints.isEmpty()) {
- m_materializationSiteToRecoveries.add(
- where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(hints);
- }
- }
-
- Node* createMaterialization(const Allocation& allocation, Node* where)
- {
- // FIXME: This is the only place where we actually use the
- // fact that an allocation's identifier is indeed the node
- // that created the allocation.
- switch (allocation.kind()) {
- case Allocation::Kind::Object: {
- ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
- StructureSet* set = m_graph.addStructureSet(allocation.structures());
-
- return m_graph.addNode(
- allocation.identifier()->prediction(), Node::VarArg, MaterializeNewObject,
- NodeOrigin(
- allocation.identifier()->origin.semantic,
- where->origin.forExit),
- OpInfo(set), OpInfo(data), 0, 0);
- }
-
- case Allocation::Kind::Function: {
- FrozenValue* executable = allocation.identifier()->cellOperand();
-
- return m_graph.addNode(
- allocation.identifier()->prediction(), NewFunction,
- NodeOrigin(
- allocation.identifier()->origin.semantic,
- where->origin.forExit),
- OpInfo(executable));
- break;
- }
-
- case Allocation::Kind::Activation: {
- ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
- FrozenValue* symbolTable = allocation.identifier()->cellOperand();
-
- return m_graph.addNode(
- allocation.identifier()->prediction(), Node::VarArg, MaterializeCreateActivation,
- NodeOrigin(
- allocation.identifier()->origin.semantic,
- where->origin.forExit),
- OpInfo(symbolTable), OpInfo(data), 0, 0);
- }
-
- default:
- DFG_CRASH(m_graph, allocation.identifier(), "Bad allocation kind");
- }
- }
-
- void promoteLocalHeap()
- {
- // Collect the set of heap locations that we will be operating
- // over.
- HashSet<PromotedHeapLocation> locations;
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- m_heap = m_heapAtHead[block];
-
- for (Node* node : *block) {
- handleNode(
- node,
- [&] (PromotedHeapLocation location, LazyNode) {
- // If the location is not on a sink candidate,
- // we only sink it if it is read
- if (m_sinkCandidates.contains(location.base()))
- locations.add(location);
- },
- [&] (PromotedHeapLocation location) -> Node* {
- locations.add(location);
- return nullptr;
- });
- }
- }
-
- // Figure out which locations belong to which allocations.
- m_locationsForAllocation.clear();
- for (PromotedHeapLocation location : locations) {
- auto result = m_locationsForAllocation.add(
- location.base(),
- Vector<PromotedHeapLocation>());
- ASSERT(!result.iterator->value.contains(location));
- result.iterator->value.append(location);
- }
-
- m_pointerSSA.reset();
- m_allocationSSA.reset();
-
- // Collect the set of "variables" that we will be sinking.
- m_locationToVariable.clear();
- m_nodeToVariable.clear();
- Vector<Node*> indexToNode;
- Vector<PromotedHeapLocation> indexToLocation;
-
- for (Node* index : m_sinkCandidates) {
- SSACalculator::Variable* variable = m_allocationSSA.newVariable();
- m_nodeToVariable.add(index, variable);
- ASSERT(indexToNode.size() == variable->index());
- indexToNode.append(index);
- }
-
- for (PromotedHeapLocation location : locations) {
- SSACalculator::Variable* variable = m_pointerSSA.newVariable();
- m_locationToVariable.add(location, variable);
- ASSERT(indexToLocation.size() == variable->index());
- indexToLocation.append(location);
- }
-
- // We insert all required constants at top of block 0 so that
- // they are inserted only once and we don't clutter the graph
- // with useless constants everywhere
- HashMap<FrozenValue*, Node*> lazyMapping;
- if (!m_bottom)
- m_bottom = m_insertionSet.insertConstant(0, NodeOrigin(), jsNumber(1927));
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- m_heap = m_heapAtHead[block];
-
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
-
- // Some named properties can be added conditionally,
- // and that would necessitate bottoms
- for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
- if (location.kind() != NamedPropertyPLoc)
- continue;
-
- SSACalculator::Variable* variable = m_locationToVariable.get(location);
- m_pointerSSA.newDef(variable, block, m_bottom);
- }
-
- for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
- Node* escapee = m_materializationToEscapee.get(materialization);
- m_allocationSSA.newDef(m_nodeToVariable.get(escapee), block, materialization);
- }
-
- if (m_sinkCandidates.contains(node))
- m_allocationSSA.newDef(m_nodeToVariable.get(node), block, node);
-
- handleNode(
- node,
- [&] (PromotedHeapLocation location, LazyNode value) {
- if (!locations.contains(location))
- return;
-
- Node* nodeValue;
- if (value.isNode())
- nodeValue = value.asNode();
- else {
- auto iter = lazyMapping.find(value.asValue());
- if (iter != lazyMapping.end())
- nodeValue = iter->value;
- else {
- nodeValue = value.ensureIsNode(
- m_insertionSet, m_graph.block(0), 0);
- lazyMapping.add(value.asValue(), nodeValue);
- }
- }
-
- SSACalculator::Variable* variable = m_locationToVariable.get(location);
- m_pointerSSA.newDef(variable, block, nodeValue);
- },
- [] (PromotedHeapLocation) -> Node* {
- return nullptr;
- });
- }
- }
- m_insertionSet.execute(m_graph.block(0));
-
- // Run the SSA calculators to create Phis
- m_pointerSSA.computePhis(
- [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
- PromotedHeapLocation location = indexToLocation[variable->index()];
-
- // Don't create Phi nodes for fields of dead allocations
- if (!m_heapAtHead[block].isAllocation(location.base()))
- return nullptr;
-
- // Don't create Phi nodes once we are escaped
- if (m_heapAtHead[block].getAllocation(location.base()).isEscapedAllocation())
- return nullptr;
-
- // If we point to a single allocation, we will
- // directly use its materialization
- if (m_heapAtHead[block].follow(location))
- return nullptr;
-
- Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
- phiNode->mergeFlags(NodeResultJS);
- return phiNode;
- });
-
- m_allocationSSA.computePhis(
- [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
- Node* identifier = indexToNode[variable->index()];
-
- // Don't create Phi nodes for dead allocations
- if (!m_heapAtHead[block].isAllocation(identifier))
- return nullptr;
-
- // Don't create Phi nodes until we are escaped
- if (!m_heapAtHead[block].getAllocation(identifier).isEscapedAllocation())
- return nullptr;
-
- Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
- phiNode->mergeFlags(NodeResultJS);
- return phiNode;
- });
-
- // Place Phis in the right places, replace all uses of any load with the appropriate
- // value, and create the materialization nodes.
- LocalOSRAvailabilityCalculator availabilityCalculator;
- m_graph.clearReplacements();
- for (BasicBlock* block : m_graph.blocksInPreOrder()) {
- m_heap = m_heapAtHead[block];
- availabilityCalculator.beginBlock(block);
-
- // These mapping tables are intended to be lazy. If
- // something is omitted from the table, it means that
- // there haven't been any local stores to the promoted
- // heap location (or any local materialization).
- m_localMapping.clear();
- m_escapeeToMaterialization.clear();
-
- // Insert the Phi functions that we had previously
- // created.
- for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(block)) {
- SSACalculator::Variable* variable = phiDef->variable();
- m_insertionSet.insert(0, phiDef->value());
-
- PromotedHeapLocation location = indexToLocation[variable->index()];
- m_localMapping.set(location, phiDef->value());
-
- if (m_sinkCandidates.contains(location.base())) {
- m_insertionSet.insert(
- 0, location.createHint(m_graph, NodeOrigin(), phiDef->value()));
- }
- }
-
- for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(block)) {
- SSACalculator::Variable* variable = phiDef->variable();
- m_insertionSet.insert(0, phiDef->value());
-
- Node* identifier = indexToNode[variable->index()];
- m_escapeeToMaterialization.add(identifier, phiDef->value());
- insertOSRHintsForUpdate(0, NodeOrigin(), availabilityCalculator.m_availability, identifier, phiDef->value());
- }
-
- if (verbose) {
- dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
- dataLog("Local materializations at ", pointerDump(block), ": ", mapDump(m_escapeeToMaterialization), "\n");
- }
-
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
- for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
- if (location.kind() != NamedPropertyPLoc)
- continue;
-
- m_localMapping.set(location, m_bottom);
-
- if (m_sinkCandidates.contains(node)) {
- m_insertionSet.insert(
- nodeIndex + 1,
- location.createHint(m_graph, node->origin, m_bottom));
- }
- }
-
- for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
- Node* escapee = m_materializationToEscapee.get(materialization);
- populateMaterialization(block, materialization, escapee);
- m_escapeeToMaterialization.set(escapee, materialization);
- m_insertionSet.insert(nodeIndex, materialization);
- if (verbose)
- dataLog("Materializing ", escapee, " => ", materialization, " at ", node, "\n");
- }
-
- for (PromotedHeapLocation location : m_materializationSiteToRecoveries.get(node))
- m_insertionSet.insert(nodeIndex, createRecovery(block, location, node));
-
- // We need to put the OSR hints after the recoveries,
- // because we only want the hints once the object is
- // complete
- for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
- Node* escapee = m_materializationToEscapee.get(materialization);
- insertOSRHintsForUpdate(
- nodeIndex, node->origin,
- availabilityCalculator.m_availability, escapee, materialization);
- }
-
- if (m_sinkCandidates.contains(node))
- m_escapeeToMaterialization.set(node, node);
-
- availabilityCalculator.executeNode(node);
-
- bool doLower = false;
- handleNode(
- node,
- [&] (PromotedHeapLocation location, LazyNode value) {
- if (!locations.contains(location))
- return;
-
- Node* nodeValue;
- if (value.isNode())
- nodeValue = value.asNode();
- else
- nodeValue = lazyMapping.get(value.asValue());
-
- nodeValue = resolve(block, nodeValue);
-
- m_localMapping.set(location, nodeValue);
-
- if (!m_sinkCandidates.contains(location.base()))
- return;
-
- doLower = true;
-
- m_insertionSet.insert(nodeIndex + 1,
- location.createHint(m_graph, node->origin, nodeValue));
- },
- [&] (PromotedHeapLocation location) -> Node* {
- return resolve(block, location);
- });
-
- if (m_sinkCandidates.contains(node) || doLower) {
- switch (node->op()) {
- case NewObject:
- case MaterializeNewObject:
- node->convertToPhantomNewObject();
- break;
-
- case NewFunction:
- node->convertToPhantomNewFunction();
- break;
-
- case CreateActivation:
- case MaterializeCreateActivation:
- node->convertToPhantomCreateActivation();
- break;
-
- default:
- node->remove();
- break;
- }
- }
-
- m_graph.doToChildren(
- node,
- [&] (Edge& edge) {
- edge.setNode(resolve(block, edge.node()));
- });
- }
-
- // Gotta drop some Upsilons.
- NodeAndIndex terminal = block->findTerminal();
- size_t upsilonInsertionPoint = terminal.index;
- NodeOrigin upsilonOrigin = terminal.node->origin;
- for (BasicBlock* successorBlock : block->successors()) {
- for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(successorBlock)) {
- Node* phiNode = phiDef->value();
- SSACalculator::Variable* variable = phiDef->variable();
- PromotedHeapLocation location = indexToLocation[variable->index()];
- Node* incoming = resolve(block, location);
-
- m_insertionSet.insertNode(
- upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
- OpInfo(phiNode), incoming->defaultEdge());
- }
-
- for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(successorBlock)) {
- Node* phiNode = phiDef->value();
- SSACalculator::Variable* variable = phiDef->variable();
- Node* incoming = getMaterialization(block, indexToNode[variable->index()]);
-
- m_insertionSet.insertNode(
- upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
- OpInfo(phiNode), incoming->defaultEdge());
- }
- }
-
- m_insertionSet.execute(block);
- }
- }
-
- Node* resolve(BasicBlock* block, PromotedHeapLocation location)
- {
- // If we are currently pointing to a single local allocation,
- // simply return the associated materialization.
- if (Node* identifier = m_heap.follow(location))
- return getMaterialization(block, identifier);
-
- if (Node* result = m_localMapping.get(location))
- return result;
-
- // This implies that there is no local mapping. Find a non-local mapping.
- SSACalculator::Def* def = m_pointerSSA.nonLocalReachingDef(
- block, m_locationToVariable.get(location));
- ASSERT(def);
- ASSERT(def->value());
-
- Node* result = def->value();
-
- ASSERT(!result->replacement());
-
- m_localMapping.add(location, result);
- return result;
- }
-
- Node* resolve(BasicBlock* block, Node* node)
- {
- // If we are currently pointing to a single local allocation,
- // simply return the associated materialization.
- if (Node* identifier = m_heap.follow(node))
- return getMaterialization(block, identifier);
-
- if (node->replacement())
- node = node->replacement();
- ASSERT(!node->replacement());
-
- return node;
- }
-
- Node* getMaterialization(BasicBlock* block, Node* identifier)
- {
- ASSERT(m_heap.isAllocation(identifier));
- if (!m_sinkCandidates.contains(identifier))
- return identifier;
-
- if (Node* materialization = m_escapeeToMaterialization.get(identifier))
- return materialization;
-
- SSACalculator::Def* def = m_allocationSSA.nonLocalReachingDef(
- block, m_nodeToVariable.get(identifier));
- ASSERT(def && def->value());
- m_escapeeToMaterialization.add(identifier, def->value());
- ASSERT(!def->value()->replacement());
- return def->value();
- }
-
- void insertOSRHintsForUpdate(unsigned nodeIndex, NodeOrigin origin, AvailabilityMap& availability, Node* escapee, Node* materialization)
- {
- // We need to follow() the value in the heap.
- // Consider the following graph:
- //
- // Block #0
- // 0: NewObject({})
- // 1: NewObject({})
- // -: PutByOffset(@0, @1, x:0)
- // -: PutStructure(@0, {x:0})
- // 2: GetByOffset(@0, x:0)
- // -: MovHint(@2, loc1)
- // -: Branch(#1, #2)
- //
- // Block #1
- // 3: Call(f, @1)
- // 4: Return(@0)
- //
- // Block #2
- // -: Return(undefined)
- //
- // We need to materialize @1 at @3, and when doing so we need
- // to insert a MovHint for the materialization into loc1 as
- // well.
- // In order to do this, we say that we need to insert an
- // update hint for any availability whose node resolve()s to
- // the materialization.
- for (auto entry : availability.m_heap) {
- if (!entry.value.hasNode())
- continue;
- if (m_heap.follow(entry.value.node()) != escapee)
- continue;
-
- m_insertionSet.insert(
- nodeIndex, entry.key.createHint(m_graph, origin, materialization));
- }
-
- for (unsigned i = availability.m_locals.size(); i--;) {
- if (!availability.m_locals[i].hasNode())
- continue;
- if (m_heap.follow(availability.m_locals[i].node()) != escapee)
- continue;
-
- int operand = availability.m_locals.operandForIndex(i);
- m_insertionSet.insertNode(
- nodeIndex, SpecNone, MovHint, origin, OpInfo(operand),
- materialization->defaultEdge());
- }
- }
-
- void populateMaterialization(BasicBlock* block, Node* node, Node* escapee)
- {
- Allocation& allocation = m_heap.getAllocation(escapee);
- switch (node->op()) {
- case MaterializeNewObject: {
- ObjectMaterializationData& data = node->objectMaterializationData();
- unsigned firstChild = m_graph.m_varArgChildren.size();
-
- Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
-
- PromotedHeapLocation structure(StructurePLoc, allocation.identifier());
- ASSERT(locations.contains(structure));
-
- m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
-
- for (PromotedHeapLocation location : locations) {
- switch (location.kind()) {
- case StructurePLoc:
- ASSERT(location == structure);
- break;
-
- case NamedPropertyPLoc: {
- ASSERT(location.base() == allocation.identifier());
- data.m_properties.append(PhantomPropertyValue(location.info()));
- Node* value = resolve(block, location);
- if (m_sinkCandidates.contains(value))
- m_graph.m_varArgChildren.append(m_bottom);
- else
- m_graph.m_varArgChildren.append(value);
- break;
- }
-
- default:
- DFG_CRASH(m_graph, node, "Bad location kind");
- }
- }
-
- node->children = AdjacencyList(
- AdjacencyList::Variable,
- firstChild, m_graph.m_varArgChildren.size() - firstChild);
- break;
- }
-
- case MaterializeCreateActivation: {
- ObjectMaterializationData& data = node->objectMaterializationData();
-
- unsigned firstChild = m_graph.m_varArgChildren.size();
-
- Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
-
- PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, allocation.identifier());
- ASSERT(locations.contains(symbolTable));
- ASSERT(node->cellOperand() == resolve(block, symbolTable)->constant());
- m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse));
-
- PromotedHeapLocation scope(ActivationScopePLoc, allocation.identifier());
- ASSERT(locations.contains(scope));
- m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
-
- for (PromotedHeapLocation location : locations) {
- switch (location.kind()) {
- case ActivationScopePLoc: {
- ASSERT(location == scope);
- break;
- }
-
- case ActivationSymbolTablePLoc: {
- ASSERT(location == symbolTable);
- break;
- }
-
- case ClosureVarPLoc: {
- ASSERT(location.base() == allocation.identifier());
- data.m_properties.append(PhantomPropertyValue(location.info()));
- Node* value = resolve(block, location);
- if (m_sinkCandidates.contains(value))
- m_graph.m_varArgChildren.append(m_bottom);
- else
- m_graph.m_varArgChildren.append(value);
- break;
- }
-
- default:
- DFG_CRASH(m_graph, node, "Bad location kind");
- }
- }
-
- node->children = AdjacencyList(
- AdjacencyList::Variable,
- firstChild, m_graph.m_varArgChildren.size() - firstChild);
- break;
- }
-
- case NewFunction: {
- Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
- ASSERT(locations.size() == 2);
-
- PromotedHeapLocation executable(FunctionExecutablePLoc, allocation.identifier());
- ASSERT_UNUSED(executable, locations.contains(executable));
-
- PromotedHeapLocation activation(FunctionActivationPLoc, allocation.identifier());
- ASSERT(locations.contains(activation));
-
- node->child1() = Edge(resolve(block, activation), KnownCellUse);
- break;
- }
-
- default:
- DFG_CRASH(m_graph, node, "Bad materialize op");
- }
- }
-
- Node* createRecovery(BasicBlock* block, PromotedHeapLocation location, Node* where)
- {
- if (verbose)
- dataLog("Recovering ", location, " at ", where, "\n");
- ASSERT(location.base()->isPhantomAllocation());
- Node* base = getMaterialization(block, location.base());
- Node* value = resolve(block, location);
-
- if (verbose)
- dataLog("Base is ", base, " and value is ", value, "\n");
-
- if (base->isPhantomAllocation()) {
- return PromotedHeapLocation(base, location.descriptor()).createHint(
- m_graph,
- NodeOrigin(
- base->origin.semantic,
- where->origin.forExit),
- value);
- }
-
- switch (location.kind()) {
- case NamedPropertyPLoc: {
- Allocation& allocation = m_heap.getAllocation(location.base());
-
- Vector<Structure*> structures;
- structures.appendRange(allocation.structures().begin(), allocation.structures().end());
- unsigned identifierNumber = location.info();
- UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
-
- std::sort(
- structures.begin(),
- structures.end(),
- [uid] (Structure *a, Structure* b) -> bool {
- return a->getConcurrently(uid) < b->getConcurrently(uid);
- });
-
- PropertyOffset firstOffset = structures[0]->getConcurrently(uid);
-
- if (firstOffset == structures.last()->getConcurrently(uid)) {
- Node* storage = base;
- // FIXME: When we decide to sink objects with a
- // property storage, we should handle non-inline offsets.
- RELEASE_ASSERT(isInlineOffset(firstOffset));
-
- StorageAccessData* data = m_graph.m_storageAccessData.add();
- data->offset = firstOffset;
- data->identifierNumber = identifierNumber;
-
- return m_graph.addNode(
- SpecNone,
- PutByOffset,
- where->origin,
- OpInfo(data),
- Edge(storage, KnownCellUse),
- Edge(base, KnownCellUse),
- value->defaultEdge());
- }
-
- MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
- data->identifierNumber = identifierNumber;
-
- {
- PropertyOffset currentOffset = firstOffset;
- StructureSet currentSet;
- for (Structure* structure : structures) {
- PropertyOffset offset = structure->getConcurrently(uid);
- if (offset != currentOffset) {
- data->variants.append(
- PutByIdVariant::replace(currentSet, currentOffset));
- currentOffset = offset;
- currentSet.clear();
- }
- currentSet.add(structure);
- }
- data->variants.append(PutByIdVariant::replace(currentSet, currentOffset));
- }
-
- return m_graph.addNode(
- SpecNone,
- MultiPutByOffset,
- NodeOrigin(
- base->origin.semantic,
- where->origin.forExit),
- OpInfo(data),
- Edge(base, KnownCellUse),
- value->defaultEdge());
- break;
- }
-
- case ClosureVarPLoc: {
- return m_graph.addNode(
- SpecNone,
- PutClosureVar,
- NodeOrigin(
- base->origin.semantic,
- where->origin.forExit),
- OpInfo(location.info()),
- Edge(base, KnownCellUse),
- value->defaultEdge());
- break;
- }
-
- default:
- DFG_CRASH(m_graph, base, "Bad location kind");
- break;
- }
- }
-
- SSACalculator m_pointerSSA;
- SSACalculator m_allocationSSA;
- HashSet<Node*> m_sinkCandidates;
- HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
- HashMap<Node*, SSACalculator::Variable*> m_nodeToVariable;
- HashMap<PromotedHeapLocation, Node*> m_localMapping;
- HashMap<Node*, Node*> m_escapeeToMaterialization;
- InsertionSet m_insertionSet;
- CombinedLiveness m_combinedLiveness;
-
- HashMap<Node*, Node*> m_materializationToEscapee;
- HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
- HashMap<Node*, Vector<PromotedHeapLocation>> m_materializationSiteToRecoveries;
-
- HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
-
- BlockMap<LocalHeap> m_heapAtHead;
- BlockMap<LocalHeap> m_heapAtTail;
- LocalHeap m_heap;
-
- Node* m_bottom = nullptr;
-};
-
-}
-
-bool performObjectAllocationSinking(Graph& graph)
-{
- SamplingRegion samplingRegion("DFG Object Allocation Sinking Phase");
- return runPhase<ObjectAllocationSinkingPhase>(graph);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.h b/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.h
deleted file mode 100644
index b400d4e69..000000000
--- a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGObjectAllocationSinkingPhase_h
-#define DFGObjectAllocationSinkingPhase_h
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-class Graph;
-
-// Eliminates allocations allocations that are never used except
-// locally. This will insert phantom allocations and store hints so
-// that OSR exit can materialize the objects. Replaces all uses of the
-// objects' fields with SSA data flow. This phase is able to handle cyclic allocation graphs.
-
-bool performObjectAllocationSinking(Graph&);
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGObjectAllocationSinkingPhase_h
diff --git a/Source/JavaScriptCore/dfg/DFGObjectMaterializationData.cpp b/Source/JavaScriptCore/dfg/DFGObjectMaterializationData.cpp
deleted file mode 100644
index 3abdbe696..000000000
--- a/Source/JavaScriptCore/dfg/DFGObjectMaterializationData.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGObjectMaterializationData.h"
-
-#if ENABLE(DFG_JIT)
-
-#include <wtf/ListDump.h>
-
-namespace JSC { namespace DFG {
-
-void PhantomPropertyValue::dump(PrintStream& out) const
-{
- out.print("id", m_identifierNumber);
-}
-
-void ObjectMaterializationData::dump(PrintStream& out) const
-{
- out.print("[", listDump(m_properties), "]");
-}
-
-float ObjectMaterializationData::oneWaySimilarityScore(
- const ObjectMaterializationData& other) const
-{
- unsigned numHits = 0;
- for (PhantomPropertyValue value : m_properties) {
- if (other.m_properties.contains(value))
- numHits++;
- }
- return static_cast<float>(numHits) / static_cast<float>(m_properties.size());
-}
-
-float ObjectMaterializationData::similarityScore(const ObjectMaterializationData& other) const
-{
- return std::min(oneWaySimilarityScore(other), other.oneWaySimilarityScore(*this));
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGObjectMaterializationData.h b/Source/JavaScriptCore/dfg/DFGObjectMaterializationData.h
deleted file mode 100644
index 1c4febe00..000000000
--- a/Source/JavaScriptCore/dfg/DFGObjectMaterializationData.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGObjectMaterializationData_h
-#define DFGObjectMaterializationData_h
-
-#if ENABLE(DFG_JIT)
-
-#include <limits.h>
-#include <wtf/MathExtras.h>
-#include <wtf/PrintStream.h>
-#include <wtf/Vector.h>
-
-namespace JSC { namespace DFG {
-
-struct PhantomPropertyValue {
- PhantomPropertyValue()
- : m_identifierNumber(UINT_MAX)
- {
- }
-
- PhantomPropertyValue(unsigned identifierNumber)
- : m_identifierNumber(identifierNumber)
- {
- }
-
- unsigned m_identifierNumber;
-
- bool operator==(const PhantomPropertyValue& other) const
- {
- return m_identifierNumber == other.m_identifierNumber;
- }
-
- void dump(PrintStream&) const;
-};
-
-struct ObjectMaterializationData {
- // Determines the meaning of the passed nodes.
- Vector<PhantomPropertyValue> m_properties;
-
- void dump(PrintStream&) const;
-
- // The fraction of my properties that the other data has.
- float oneWaySimilarityScore(const ObjectMaterializationData&) const;
-
- // The minimum of the two possible one-way scores.
- float similarityScore(const ObjectMaterializationData&) const;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGObjectMaterializationData_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp
index 34a097643..efe19a4f6 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,19 +26,17 @@
#include "config.h"
#include "DFGOperations.h"
+#include "Arguments.h"
#include "ButterflyInlines.h"
-#include "ClonedArguments.h"
#include "CodeBlock.h"
#include "CommonSlowPaths.h"
#include "CopiedSpaceInlines.h"
#include "DFGDriver.h"
-#include "DFGJITCode.h"
#include "DFGOSRExit.h"
#include "DFGThunks.h"
#include "DFGToFTLDeferredCompilationCallback.h"
#include "DFGToFTLForOSREntryDeferredCompilationCallback.h"
#include "DFGWorklist.h"
-#include "DirectArguments.h"
#include "FTLForOSREntryJITCode.h"
#include "FTLOSREntry.h"
#include "HostCallReturnValue.h"
@@ -46,16 +44,16 @@
#include "Interpreter.h"
#include "JIT.h"
#include "JITExceptions.h"
-#include "JSCInlines.h"
-#include "JSLexicalEnvironment.h"
+#include "JITOperationWrappers.h"
+#include "JSActivation.h"
+#include "VM.h"
+#include "JSNameScope.h"
+#include "NameInstance.h"
#include "ObjectConstructor.h"
+#include "Operations.h"
#include "Repatch.h"
-#include "ScopedArguments.h"
#include "StringConstructor.h"
-#include "Symbol.h"
-#include "TypeProfilerLog.h"
#include "TypedArrayInlines.h"
-#include "VM.h"
#include <wtf/InlineASM.h>
#if ENABLE(JIT)
@@ -68,7 +66,6 @@ static inline void putByVal(ExecState* exec, JSValue baseValue, uint32_t index,
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- ASSERT(isIndex(index));
if (direct) {
RELEASE_ASSERT(baseValue.isObject());
asObject(baseValue)->putDirectIndex(exec, index, value, 0, strict ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
@@ -81,7 +78,7 @@ static inline void putByVal(ExecState* exec, JSValue baseValue, uint32_t index,
return;
}
- object->methodTable(vm)->putByIndex(object, exec, index, value, strict);
+ object->methodTable()->putByIndex(object, exec, index, value, strict);
return;
}
@@ -99,8 +96,6 @@ ALWAYS_INLINE static void JIT_OPERATION operationPutByValInternal(ExecState* exe
JSValue value = JSValue::decode(encodedValue);
if (LIKELY(property.isUInt32())) {
- // Despite its name, JSValue::isUInt32 will return true only for positive boxed int32_t; all those values are valid array indices.
- ASSERT(isIndex(property.asUInt32()));
putByVal<strict, direct>(exec, baseValue, property.asUInt32(), value);
return;
}
@@ -108,26 +103,32 @@ ALWAYS_INLINE static void JIT_OPERATION operationPutByValInternal(ExecState* exe
if (property.isDouble()) {
double propertyAsDouble = property.asDouble();
uint32_t propertyAsUInt32 = static_cast<uint32_t>(propertyAsDouble);
- if (propertyAsDouble == propertyAsUInt32 && isIndex(propertyAsUInt32)) {
+ if (propertyAsDouble == propertyAsUInt32) {
putByVal<strict, direct>(exec, baseValue, propertyAsUInt32, value);
return;
}
}
- // Don't put to an object if toString throws an exception.
- auto propertyName = property.toPropertyKey(exec);
- if (vm->exception())
+ if (isName(property)) {
+ PutPropertySlot slot(baseValue, strict);
+ if (direct) {
+ RELEASE_ASSERT(baseValue.isObject());
+ asObject(baseValue)->putDirect(*vm, jsCast<NameInstance*>(property.asCell())->privateName(), value, slot);
+ } else
+ baseValue.put(exec, jsCast<NameInstance*>(property.asCell())->privateName(), value, slot);
return;
+ }
- PutPropertySlot slot(baseValue, strict);
- if (direct) {
- RELEASE_ASSERT(baseValue.isObject());
- if (Optional<uint32_t> index = parseIndex(propertyName))
- asObject(baseValue)->putDirectIndex(exec, index.value(), value, 0, strict ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
- else
- asObject(baseValue)->putDirect(*vm, propertyName, value, slot);
- } else
- baseValue.put(exec, propertyName, value, slot);
+ // Don't put to an object if toString throws an exception.
+ Identifier ident(exec, property.toString(exec)->value(exec));
+ if (!vm->exception()) {
+ PutPropertySlot slot(baseValue, strict);
+ if (direct) {
+ RELEASE_ASSERT(baseValue.isObject());
+ asObject(baseValue)->putDirect(*vm, jsCast<NameInstance*>(property.asCell())->privateName(), value, slot);
+ } else
+ baseValue.put(exec, ident, value, slot);
+ }
}
template<typename ViewClass>
@@ -136,7 +137,7 @@ char* newTypedArrayWithSize(ExecState* exec, Structure* structure, int32_t size)
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
if (size < 0) {
- vm.throwException(exec, createRangeError(exec, ASCIILiteral("Requested length is negative")));
+ vm.throwException(exec, createRangeError(exec, "Requested length is negative"));
return 0;
}
return bitwise_cast<char*>(ViewClass::create(exec, structure, size));
@@ -155,7 +156,7 @@ char* newTypedArrayWithOneArgument(
RefPtr<ArrayBuffer> buffer = jsBuffer->impl();
if (buffer->byteLength() % ViewClass::elementSize) {
- vm.throwException(exec, createRangeError(exec, ASCIILiteral("ArrayBuffer length minus the byteOffset is not a multiple of the element size")));
+ vm.throwException(exec, createRangeError(exec, "ArrayBuffer length minus the byteOffset is not a multiple of the element size"));
return 0;
}
return bitwise_cast<char*>(
@@ -182,18 +183,18 @@ char* newTypedArrayWithOneArgument(
if (value.isInt32())
length = value.asInt32();
else if (!value.isNumber()) {
- vm.throwException(exec, createTypeError(exec, ASCIILiteral("Invalid array length argument")));
+ vm.throwException(exec, createTypeError(exec, "Invalid array length argument"));
return 0;
} else {
length = static_cast<int>(value.asNumber());
if (length != value.asNumber()) {
- vm.throwException(exec, createTypeError(exec, ASCIILiteral("Invalid array length argument (fractional lengths not allowed)")));
+ vm.throwException(exec, createTypeError(exec, "Invalid array length argument (fractional lengths not allowed)"));
return 0;
}
}
if (length < 0) {
- vm.throwException(exec, createRangeError(exec, ASCIILiteral("Requested length is negative")));
+ vm.throwException(exec, createRangeError(exec, "Requested length is negative"));
return 0;
}
@@ -220,15 +221,15 @@ EncodedJSValue JIT_OPERATION operationToThisStrict(ExecState* exec, EncodedJSVal
JSCell* JIT_OPERATION operationCreateThis(ExecState* exec, JSObject* constructor, int32_t inlineCapacity)
{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
#if !ASSERT_DISABLED
ConstructData constructData;
- ASSERT(jsCast<JSFunction*>(constructor)->methodTable(vm)->getConstructData(jsCast<JSFunction*>(constructor), constructData) == ConstructTypeJS);
+ ASSERT(jsCast<JSFunction*>(constructor)->methodTable()->getConstructData(jsCast<JSFunction*>(constructor), constructData) == ConstructTypeJS);
#endif
- return constructEmptyObject(exec, jsCast<JSFunction*>(constructor)->rareData(exec, inlineCapacity)->allocationProfile()->structure());
+ return constructEmptyObject(exec, jsCast<JSFunction*>(constructor)->allocationProfile(exec, inlineCapacity)->structure());
}
EncodedJSValue JIT_OPERATION operationValueAdd(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
@@ -258,7 +259,7 @@ EncodedJSValue JIT_OPERATION operationValueAddNotNumber(ExecState* exec, Encoded
return JSValue::encode(jsAddSlowCase(exec, op1, op2));
}
-static ALWAYS_INLINE EncodedJSValue getByVal(ExecState* exec, JSCell* base, uint32_t index)
+static inline EncodedJSValue getByVal(ExecState* exec, JSCell* base, uint32_t index)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
@@ -277,8 +278,8 @@ static ALWAYS_INLINE EncodedJSValue getByVal(ExecState* exec, JSCell* base, uint
EncodedJSValue JIT_OPERATION operationGetByVal(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedProperty)
{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
JSValue baseValue = JSValue::decode(encodedBase);
JSValue property = JSValue::decode(encodedProperty);
@@ -291,32 +292,25 @@ EncodedJSValue JIT_OPERATION operationGetByVal(ExecState* exec, EncodedJSValue e
} else if (property.isDouble()) {
double propertyAsDouble = property.asDouble();
uint32_t propertyAsUInt32 = static_cast<uint32_t>(propertyAsDouble);
- if (propertyAsUInt32 == propertyAsDouble && isIndex(propertyAsUInt32))
+ if (propertyAsUInt32 == propertyAsDouble)
return getByVal(exec, base, propertyAsUInt32);
} else if (property.isString()) {
- Structure& structure = *base->structure(vm);
- if (JSCell::canUseFastGetOwnProperty(structure)) {
- if (RefPtr<AtomicStringImpl> existingAtomicString = asString(property)->toExistingAtomicString(exec)) {
- if (JSValue result = base->fastGetOwnProperty(vm, structure, existingAtomicString.get()))
- return JSValue::encode(result);
- }
- }
+ if (JSValue result = base->fastGetOwnProperty(exec, asString(property)->value(exec)))
+ return JSValue::encode(result);
}
}
- baseValue.requireObjectCoercible(exec);
- if (exec->hadException())
- return JSValue::encode(jsUndefined());
- auto propertyName = property.toPropertyKey(exec);
- if (exec->hadException())
- return JSValue::encode(jsUndefined());
- return JSValue::encode(baseValue.get(exec, propertyName));
+ if (isName(property))
+ return JSValue::encode(baseValue.get(exec, jsCast<NameInstance*>(property.asCell())->privateName()));
+
+ Identifier ident(exec, property.toString(exec)->value(exec));
+ return JSValue::encode(baseValue.get(exec, ident));
}
EncodedJSValue JIT_OPERATION operationGetByValCell(ExecState* exec, JSCell* base, EncodedJSValue encodedProperty)
{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
JSValue property = JSValue::decode(encodedProperty);
@@ -328,19 +322,15 @@ EncodedJSValue JIT_OPERATION operationGetByValCell(ExecState* exec, JSCell* base
if (propertyAsUInt32 == propertyAsDouble)
return getByVal(exec, base, propertyAsUInt32);
} else if (property.isString()) {
- Structure& structure = *base->structure(vm);
- if (JSCell::canUseFastGetOwnProperty(structure)) {
- if (RefPtr<AtomicStringImpl> existingAtomicString = asString(property)->toExistingAtomicString(exec)) {
- if (JSValue result = base->fastGetOwnProperty(vm, structure, existingAtomicString.get()))
- return JSValue::encode(result);
- }
- }
+ if (JSValue result = base->fastGetOwnProperty(exec, asString(property)->value(exec)))
+ return JSValue::encode(result);
}
- auto propertyName = property.toPropertyKey(exec);
- if (exec->hadException())
- return JSValue::encode(jsUndefined());
- return JSValue::encode(JSValue(base).get(exec, propertyName));
+ if (isName(property))
+ return JSValue::encode(JSValue(base).get(exec, jsCast<NameInstance*>(property.asCell())->privateName()));
+
+ Identifier ident(exec, property.toString(exec)->value(exec));
+ return JSValue::encode(JSValue(base).get(exec, ident));
}
ALWAYS_INLINE EncodedJSValue getByValCellInt(ExecState* exec, JSCell* base, int32_t index)
@@ -401,8 +391,8 @@ void JIT_OPERATION operationPutByValCellNonStrict(ExecState* exec, JSCell* cell,
void JIT_OPERATION operationPutByValBeyondArrayBoundsStrict(ExecState* exec, JSObject* array, int32_t index, EncodedJSValue encodedValue)
{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
if (index >= 0) {
array->putByIndexInline(exec, index, JSValue::decode(encodedValue), true);
@@ -761,184 +751,79 @@ char* JIT_OPERATION operationNewFloat64ArrayWithOneArgument(
return newTypedArrayWithOneArgument<JSFloat64Array>(exec, structure, encodedValue);
}
-JSCell* JIT_OPERATION operationCreateActivationDirect(ExecState* exec, Structure* structure, JSScope* scope, SymbolTable* table, EncodedJSValue initialValueEncoded)
+JSCell* JIT_OPERATION operationCreateInlinedArguments(
+ ExecState* exec, InlineCallFrame* inlineCallFrame)
{
- JSValue initialValue = JSValue::decode(initialValueEncoded);
- ASSERT(initialValue == jsUndefined() || initialValue == jsTDZValue());
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- return JSLexicalEnvironment::create(vm, structure, scope, table, initialValue);
-}
-
-JSCell* JIT_OPERATION operationCreateDirectArguments(ExecState* exec, Structure* structure, int32_t length, int32_t minCapacity)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer target(&vm, exec);
- DirectArguments* result = DirectArguments::create(
- vm, structure, length, std::max(length, minCapacity));
- // The caller will store to this object without barriers. Most likely, at this point, this is
- // still a young object and so no barriers are needed. But it's good to be careful anyway,
- // since the GC should be allowed to do crazy (like pretenuring, for example).
- vm.heap.writeBarrier(result);
+ // NB: This needs to be exceedingly careful with top call frame tracking, since it
+ // may be called from OSR exit, while the state of the call stack is bizarre.
+ Arguments* result = Arguments::create(vm, exec, inlineCallFrame);
+ ASSERT(!vm.exception());
return result;
}
-JSCell* JIT_OPERATION operationCreateScopedArguments(ExecState* exec, Structure* structure, Register* argumentStart, int32_t length, JSFunction* callee, JSLexicalEnvironment* scope)
+void JIT_OPERATION operationTearOffInlinedArguments(
+ ExecState* exec, JSCell* argumentsCell, JSCell* activationCell, InlineCallFrame* inlineCallFrame)
{
- VM& vm = exec->vm();
- NativeCallFrameTracer target(&vm, exec);
-
- // We could pass the ScopedArgumentsTable* as an argument. We currently don't because I
- // didn't feel like changing the max number of arguments for a slow path call from 6 to 7.
- ScopedArgumentsTable* table = scope->symbolTable()->arguments();
-
- return ScopedArguments::createByCopyingFrom(
- vm, structure, argumentStart, length, callee, table, scope);
-}
-
-JSCell* JIT_OPERATION operationCreateClonedArguments(ExecState* exec, Structure* structure, Register* argumentStart, int32_t length, JSFunction* callee)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer target(&vm, exec);
- return ClonedArguments::createByCopyingFrom(
- exec, structure, argumentStart, length, callee);
+ ASSERT_UNUSED(activationCell, !activationCell); // Currently, we don't inline functions with activations.
+ jsCast<Arguments*>(argumentsCell)->tearOff(exec, inlineCallFrame);
}
-JSCell* JIT_OPERATION operationCreateDirectArgumentsDuringExit(ExecState* exec, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount)
+EncodedJSValue JIT_OPERATION operationGetArgumentByVal(ExecState* exec, int32_t argumentsRegister, int32_t index)
{
VM& vm = exec->vm();
- NativeCallFrameTracer target(&vm, exec);
-
- DeferGCForAWhile deferGC(vm.heap);
-
- CodeBlock* codeBlock;
- if (inlineCallFrame)
- codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
- else
- codeBlock = exec->codeBlock();
-
- unsigned length = argumentCount - 1;
- unsigned capacity = std::max(length, static_cast<unsigned>(codeBlock->numParameters() - 1));
- DirectArguments* result = DirectArguments::create(
- vm, codeBlock->globalObject()->directArgumentsStructure(), length, capacity);
-
- result->callee().set(vm, result, callee);
-
- Register* arguments =
- exec->registers() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0) +
- CallFrame::argumentOffset(0);
- for (unsigned i = length; i--;)
- result->setIndexQuickly(vm, i, arguments[i].jsValue());
-
- return result;
-}
+ NativeCallFrameTracer tracer(&vm, exec);
-JSCell* JIT_OPERATION operationCreateClonedArgumentsDuringExit(ExecState* exec, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer target(&vm, exec);
-
- DeferGCForAWhile deferGC(vm.heap);
+ JSValue argumentsValue = exec->uncheckedR(argumentsRegister).jsValue();
- CodeBlock* codeBlock;
- if (inlineCallFrame)
- codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
- else
- codeBlock = exec->codeBlock();
-
- unsigned length = argumentCount - 1;
- ClonedArguments* result = ClonedArguments::createEmpty(
- vm, codeBlock->globalObject()->outOfBandArgumentsStructure(), callee);
-
- Register* arguments =
- exec->registers() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0) +
- CallFrame::argumentOffset(0);
- for (unsigned i = length; i--;)
- result->putDirectIndex(exec, i, arguments[i].jsValue());
-
- result->putDirect(vm, vm.propertyNames->length, jsNumber(length));
+ // If there are no arguments, and we're accessing out of bounds, then we have to create the
+ // arguments in case someone has installed a getter on a numeric property.
+ if (!argumentsValue)
+ exec->uncheckedR(argumentsRegister) = argumentsValue = Arguments::create(exec->vm(), exec);
- return result;
+ return JSValue::encode(argumentsValue.get(exec, index));
}
-size_t JIT_OPERATION operationObjectIsObject(ExecState* exec, JSGlobalObject* globalObject, JSCell* object)
+EncodedJSValue JIT_OPERATION operationGetInlinedArgumentByVal(
+ ExecState* exec, int32_t argumentsRegister, InlineCallFrame* inlineCallFrame, int32_t index)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- ASSERT(jsDynamicCast<JSObject*>(object));
+ JSValue argumentsValue = exec->uncheckedR(argumentsRegister).jsValue();
- if (object->structure(vm)->masqueradesAsUndefined(globalObject))
- return false;
- if (object->type() == JSFunctionType)
- return false;
- if (object->inlineTypeFlags() & TypeOfShouldCallGetCallData) {
- CallData callData;
- if (object->methodTable(vm)->getCallData(object, callData) != CallTypeNone)
- return false;
+ // If there are no arguments, and we're accessing out of bounds, then we have to create the
+ // arguments in case someone has installed a getter on a numeric property.
+ if (!argumentsValue) {
+ exec->uncheckedR(argumentsRegister) = argumentsValue =
+ Arguments::create(exec->vm(), exec, inlineCallFrame);
}
- return true;
+ return JSValue::encode(argumentsValue.get(exec, index));
}
-size_t JIT_OPERATION operationObjectIsFunction(ExecState* exec, JSGlobalObject* globalObject, JSCell* object)
+JSCell* JIT_OPERATION operationNewFunctionNoCheck(ExecState* exec, JSCell* functionExecutable)
{
+ ASSERT(functionExecutable->inherits(FunctionExecutable::info()));
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
-
- ASSERT(jsDynamicCast<JSObject*>(object));
-
- if (object->structure(vm)->masqueradesAsUndefined(globalObject))
- return false;
- if (object->type() == JSFunctionType)
- return true;
- if (object->inlineTypeFlags() & TypeOfShouldCallGetCallData) {
- CallData callData;
- if (object->methodTable(vm)->getCallData(object, callData) != CallTypeNone)
- return true;
- }
-
- return false;
+ return JSFunction::create(vm, static_cast<FunctionExecutable*>(functionExecutable), exec->scope());
}
-JSCell* JIT_OPERATION operationTypeOfObject(ExecState* exec, JSGlobalObject* globalObject, JSCell* object)
+size_t JIT_OPERATION operationIsObject(ExecState* exec, EncodedJSValue value)
{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
- ASSERT(jsDynamicCast<JSObject*>(object));
-
- if (object->structure(vm)->masqueradesAsUndefined(globalObject))
- return vm.smallStrings.undefinedString();
- if (object->type() == JSFunctionType)
- return vm.smallStrings.functionString();
- if (object->inlineTypeFlags() & TypeOfShouldCallGetCallData) {
- CallData callData;
- if (object->methodTable(vm)->getCallData(object, callData) != CallTypeNone)
- return vm.smallStrings.functionString();
- }
-
- return vm.smallStrings.objectString();
+ return jsIsObjectType(exec, JSValue::decode(value));
}
-int32_t JIT_OPERATION operationTypeOfObjectAsTypeofType(ExecState* exec, JSGlobalObject* globalObject, JSCell* object)
+size_t JIT_OPERATION operationIsFunction(EncodedJSValue value)
{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
+ return jsIsFunctionType(JSValue::decode(value));
+}
- ASSERT(jsDynamicCast<JSObject*>(object));
-
- if (object->structure(vm)->masqueradesAsUndefined(globalObject))
- return static_cast<int32_t>(TypeofType::Undefined);
- if (object->type() == JSFunctionType)
- return static_cast<int32_t>(TypeofType::Function);
- if (object->inlineTypeFlags() & TypeOfShouldCallGetCallData) {
- CallData callData;
- if (object->methodTable(vm)->getCallData(object, callData) != CallTypeNone)
- return static_cast<int32_t>(TypeofType::Function);
- }
-
- return static_cast<int32_t>(TypeofType::Object);
+JSCell* JIT_OPERATION operationTypeOf(ExecState* exec, JSCell* value)
+{
+ return jsTypeStringForValue(exec, JSValue(value)).asCell();
}
char* JIT_OPERATION operationAllocatePropertyStorageWithInitialCapacity(ExecState* exec)
@@ -1015,6 +900,17 @@ char* JIT_OPERATION operationEnsureContiguous(ExecState* exec, JSCell* cell)
return reinterpret_cast<char*>(asObject(cell)->ensureContiguous(vm).data());
}
+char* JIT_OPERATION operationRageEnsureContiguous(ExecState* exec, JSCell* cell)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ if (!cell->isObject())
+ return 0;
+
+ return reinterpret_cast<char*>(asObject(cell)->rageEnsureContiguous(vm).data());
+}
+
char* JIT_OPERATION operationEnsureArrayStorage(ExecState* exec, JSCell* cell)
{
VM& vm = exec->vm();
@@ -1066,22 +962,6 @@ JSCell* JIT_OPERATION operationToString(ExecState* exec, EncodedJSValue value)
return JSValue::decode(value).toString(exec);
}
-JSCell* JIT_OPERATION operationCallStringConstructorOnCell(ExecState* exec, JSCell* cell)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
- return stringConstructor(exec, cell);
-}
-
-JSCell* JIT_OPERATION operationCallStringConstructor(ExecState* exec, EncodedJSValue value)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
- return stringConstructor(exec, JSValue::decode(value));
-}
-
JSCell* JIT_OPERATION operationMakeRope2(ExecState* exec, JSString* left, JSString* right)
{
VM& vm = exec->vm();
@@ -1130,48 +1010,12 @@ char* JIT_OPERATION operationSwitchString(ExecState* exec, size_t tableIndex, JS
return static_cast<char*>(exec->codeBlock()->stringSwitchJumpTable(tableIndex).ctiForValue(string->value(exec).impl()).executableAddress());
}
-int32_t JIT_OPERATION operationSwitchStringAndGetBranchOffset(ExecState* exec, size_t tableIndex, JSString* string)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
- return exec->codeBlock()->stringSwitchJumpTable(tableIndex).offsetForValue(string->value(exec).impl(), std::numeric_limits<int32_t>::min());
-}
-
-void JIT_OPERATION operationNotifyWrite(ExecState* exec, WatchpointSet* set)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
- set->touch("Executed NotifyWrite");
-}
-
-void JIT_OPERATION operationThrowStackOverflowForVarargs(ExecState* exec)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
- throwStackOverflowError(exec);
-}
-
-int32_t JIT_OPERATION operationSizeOfVarargs(ExecState* exec, EncodedJSValue encodedArguments, int32_t firstVarArgOffset)
+void JIT_OPERATION operationInvalidate(ExecState* exec, VariableWatchpointSet* set)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- JSValue arguments = JSValue::decode(encodedArguments);
-
- return sizeOfVarargs(exec, arguments, firstVarArgOffset);
-}
-void JIT_OPERATION operationLoadVarargs(ExecState* exec, int32_t firstElementDest, EncodedJSValue encodedArguments, int32_t offset, int32_t length, int32_t mandatoryMinimum)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
- JSValue arguments = JSValue::decode(encodedArguments);
-
- loadVarargs(exec, VirtualRegister(firstElementDest), arguments, offset, length);
-
- for (int32_t i = length; i < mandatoryMinimum; ++i)
- exec->r(firstElementDest + i) = jsUndefined();
+ set->invalidate();
}
double JIT_OPERATION operationFModOnInts(int32_t a, int32_t b)
@@ -1186,24 +1030,6 @@ JSCell* JIT_OPERATION operationStringFromCharCode(ExecState* exec, int32_t op1)
return JSC::stringFromCharCode(exec, op1);
}
-int64_t JIT_OPERATION operationConvertBoxedDoubleToInt52(EncodedJSValue encodedValue)
-{
- JSValue value = JSValue::decode(encodedValue);
- if (!value.isDouble())
- return JSValue::notInt52;
- return tryConvertToInt52(value.asDouble());
-}
-
-int64_t JIT_OPERATION operationConvertDoubleToInt52(double value)
-{
- return tryConvertToInt52(value);
-}
-
-void JIT_OPERATION operationProcessTypeProfilerLogDFG(ExecState* exec)
-{
- exec->vm().typeProfilerLog()->processLogEntries(ASCIILiteral("Log Full, called from inside DFG."));
-}
-
size_t JIT_OPERATION dfgConvertJSValueToInt32(ExecState* exec, EncodedJSValue value)
{
VM* vm = &exec->vm();
@@ -1221,8 +1047,8 @@ void JIT_OPERATION debugOperationPrintSpeculationFailure(ExecState* exec, void*
SpeculationFailureDebugInfo* debugInfo = static_cast<SpeculationFailureDebugInfo*>(debugInfoRaw);
CodeBlock* codeBlock = debugInfo->codeBlock;
CodeBlock* alternative = codeBlock->alternative();
- dataLog("Speculation failure in ", *codeBlock);
- dataLog(" @ exit #", vm->osrExitIndex, " (bc#", debugInfo->bytecodeOffset, ", ", exitKindToString(debugInfo->kind), ") with ");
+ dataLog(
+ "Speculation failure in ", *codeBlock, " with ");
if (alternative) {
dataLog(
"executeCounter = ", alternative->jitExecuteCounter(),
@@ -1251,7 +1077,7 @@ void JIT_OPERATION debugOperationPrintSpeculationFailure(ExecState* exec, void*
dataLog("\n");
}
-extern "C" void JIT_OPERATION triggerReoptimizationNow(CodeBlock* codeBlock, OSRExitBase* exit)
+extern "C" void JIT_OPERATION triggerReoptimizationNow(CodeBlock* codeBlock)
{
// It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
// really be profitable.
@@ -1275,21 +1101,13 @@ extern "C" void JIT_OPERATION triggerReoptimizationNow(CodeBlock* codeBlock, OSR
ASSERT(codeBlock->hasOptimizedReplacement());
CodeBlock* optimizedCodeBlock = codeBlock->replacement();
ASSERT(JITCode::isOptimizingJIT(optimizedCodeBlock->jitType()));
-
- bool didTryToEnterIntoInlinedLoops = false;
- for (InlineCallFrame* inlineCallFrame = exit->m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) {
- if (inlineCallFrame->executable->didTryToEnterInLoop()) {
- didTryToEnterIntoInlinedLoops = true;
- break;
- }
- }
// In order to trigger reoptimization, one of two things must have happened:
// 1) We exited more than some number of times.
// 2) We exited and got stuck in a loop, and now we're exiting again.
bool didExitABunch = optimizedCodeBlock->shouldReoptimizeNow();
bool didGetStuckInLoop =
- (codeBlock->checkIfOptimizationThresholdReached() || didTryToEnterIntoInlinedLoops)
+ codeBlock->checkIfOptimizationThresholdReached()
&& optimizedCodeBlock->shouldReoptimizeFromLoopNow();
if (!didExitABunch && !didGetStuckInLoop) {
@@ -1299,12 +1117,25 @@ extern "C" void JIT_OPERATION triggerReoptimizationNow(CodeBlock* codeBlock, OSR
return;
}
- optimizedCodeBlock->jettison(Profiler::JettisonDueToOSRExit, CountReoptimization);
+ optimizedCodeBlock->jettison(CountReoptimization);
}
#if ENABLE(FTL_JIT)
-static void triggerFTLReplacementCompile(VM* vm, CodeBlock* codeBlock, JITCode* jitCode)
+void JIT_OPERATION triggerTierUpNow(ExecState* exec)
{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+ DeferGC deferGC(vm->heap);
+ CodeBlock* codeBlock = exec->codeBlock();
+
+ JITCode* jitCode = codeBlock->jitCode()->dfg();
+
+ if (Options::verboseOSR()) {
+ dataLog(
+ *codeBlock, ": Entered triggerTierUpNow with executeCounter = ",
+ jitCode->tierUpCounter, "\n");
+ }
+
if (codeBlock->baselineVersion()->m_didFailFTLCompilation) {
if (Options::verboseOSR())
dataLog("Deferring FTL-optimization of ", *codeBlock, " indefinitely because there was an FTL failure.\n");
@@ -1319,7 +1150,7 @@ static void triggerFTLReplacementCompile(VM* vm, CodeBlock* codeBlock, JITCode*
}
Worklist::State worklistState;
- if (Worklist* worklist = existingGlobalFTLWorklistOrNull()) {
+ if (Worklist* worklist = vm->worklist.get()) {
worklistState = worklist->completeAllReadyPlansForVM(
*vm, CompilationKey(codeBlock->baselineVersion(), FTLMode));
} else
@@ -1348,43 +1179,8 @@ static void triggerFTLReplacementCompile(VM* vm, CodeBlock* codeBlock, JITCode*
// We need to compile the code.
compile(
- *vm, codeBlock->newReplacement().get(), codeBlock, FTLMode, UINT_MAX,
- Operands<JSValue>(), ToFTLDeferredCompilationCallback::create(codeBlock));
-}
-
-static void triggerTierUpNowCommon(ExecState* exec, bool inLoop)
-{
- VM* vm = &exec->vm();
- NativeCallFrameTracer tracer(vm, exec);
- DeferGC deferGC(vm->heap);
- CodeBlock* codeBlock = exec->codeBlock();
-
- if (codeBlock->jitType() != JITCode::DFGJIT) {
- dataLog("Unexpected code block in DFG->FTL tier-up: ", *codeBlock, "\n");
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- JITCode* jitCode = codeBlock->jitCode()->dfg();
-
- if (Options::verboseOSR()) {
- dataLog(
- *codeBlock, ": Entered triggerTierUpNow with executeCounter = ",
- jitCode->tierUpCounter, "\n");
- }
- if (inLoop)
- jitCode->nestedTriggerIsSet = 1;
-
- triggerFTLReplacementCompile(vm, codeBlock, jitCode);
-}
-
-void JIT_OPERATION triggerTierUpNow(ExecState* exec)
-{
- triggerTierUpNowCommon(exec, false);
-}
-
-void JIT_OPERATION triggerTierUpNowInLoop(ExecState* exec)
-{
- triggerTierUpNowCommon(exec, true);
+ *vm, codeBlock->newReplacement().get(), FTLMode, UINT_MAX, Operands<JSValue>(),
+ ToFTLDeferredCompilationCallback::create(codeBlock), vm->ensureWorklist());
}
char* JIT_OPERATION triggerOSREntryNow(
@@ -1395,61 +1191,63 @@ char* JIT_OPERATION triggerOSREntryNow(
DeferGC deferGC(vm->heap);
CodeBlock* codeBlock = exec->codeBlock();
- if (codeBlock->jitType() != JITCode::DFGJIT) {
- dataLog("Unexpected code block in DFG->FTL tier-up: ", *codeBlock, "\n");
- RELEASE_ASSERT_NOT_REACHED();
- }
-
JITCode* jitCode = codeBlock->jitCode()->dfg();
- jitCode->nestedTriggerIsSet = 0;
if (Options::verboseOSR()) {
dataLog(
- *codeBlock, ": Entered triggerOSREntryNow with executeCounter = ",
+ *codeBlock, ": Entered triggerTierUpNow with executeCounter = ",
jitCode->tierUpCounter, "\n");
}
- // - If we don't have an FTL code block, then try to compile one.
- // - If we do have an FTL code block, then try to enter for a while.
- // - If we couldn't enter for a while, then trigger OSR entry.
-
- triggerFTLReplacementCompile(vm, codeBlock, jitCode);
-
- if (!codeBlock->hasOptimizedReplacement())
+ if (codeBlock->baselineVersion()->m_didFailFTLCompilation) {
+ if (Options::verboseOSR())
+ dataLog("Deferring FTL-optimization of ", *codeBlock, " indefinitely because there was an FTL failure.\n");
+ jitCode->dontOptimizeAnytimeSoon(codeBlock);
return 0;
+ }
- if (jitCode->osrEntryRetry < Options::ftlOSREntryRetryThreshold()) {
- jitCode->osrEntryRetry++;
+ if (!jitCode->checkIfOptimizationThresholdReached(codeBlock)) {
+ if (Options::verboseOSR())
+ dataLog("Choosing not to FTL-optimize ", *codeBlock, " yet.\n");
return 0;
}
- // It's time to try to compile code for OSR entry.
Worklist::State worklistState;
- if (Worklist* worklist = existingGlobalFTLWorklistOrNull()) {
+ if (Worklist* worklist = vm->worklist.get()) {
worklistState = worklist->completeAllReadyPlansForVM(
*vm, CompilationKey(codeBlock->baselineVersion(), FTLForOSREntryMode));
} else
worklistState = Worklist::NotKnown;
- if (worklistState == Worklist::Compiling)
+ if (worklistState == Worklist::Compiling) {
+ ASSERT(!jitCode->osrEntryBlock);
+ jitCode->setOptimizationThresholdBasedOnCompilationResult(
+ codeBlock, CompilationDeferred);
return 0;
+ }
if (CodeBlock* entryBlock = jitCode->osrEntryBlock.get()) {
void* address = FTL::prepareOSREntry(
exec, codeBlock, entryBlock, bytecodeIndex, streamIndex);
- if (address)
+ if (address) {
+ jitCode->optimizeSoon(codeBlock);
return static_cast<char*>(address);
+ }
FTL::ForOSREntryJITCode* entryCode = entryBlock->jitCode()->ftlForOSREntry();
entryCode->countEntryFailure();
if (entryCode->entryFailureCount() <
- Options::ftlOSREntryFailureCountForReoptimization())
+ Options::ftlOSREntryFailureCountForReoptimization()) {
+
+ jitCode->optimizeSoon(codeBlock);
return 0;
+ }
// OSR entry failed. Oh no! This implies that we need to retry. We retry
// without exponential backoff and we only do this for the entry code block.
- jitCode->osrEntryBlock = nullptr;
- jitCode->osrEntryRetry = 0;
+ jitCode->osrEntryBlock.clear();
+
+ jitCode->optimizeAfterWarmUp(codeBlock);
return 0;
}
@@ -1460,28 +1258,87 @@ char* JIT_OPERATION triggerOSREntryNow(
return 0;
}
- // We aren't compiling and haven't compiled anything for OSR entry. So, try to compile
- // something.
+ // The first order of business is to trigger a for-entry compile.
Operands<JSValue> mustHandleValues;
jitCode->reconstruct(
exec, codeBlock, CodeOrigin(bytecodeIndex), streamIndex, mustHandleValues);
- RefPtr<CodeBlock> replacementCodeBlock = codeBlock->newReplacement();
- CompilationResult forEntryResult = compile(
- *vm, replacementCodeBlock.get(), codeBlock, FTLForOSREntryMode, bytecodeIndex,
- mustHandleValues, ToFTLForOSREntryDeferredCompilationCallback::create(codeBlock));
+ CompilationResult forEntryResult = DFG::compile(
+ *vm, codeBlock->newReplacement().get(), FTLForOSREntryMode, bytecodeIndex,
+ mustHandleValues, ToFTLForOSREntryDeferredCompilationCallback::create(codeBlock),
+ vm->ensureWorklist());
+
+ // But we also want to trigger a replacement compile. Of course, we don't want to
+ // trigger it if we don't need to. Note that this is kind of weird because we might
+ // have just finished an FTL compile and that compile failed or was invalidated.
+ // But this seems uncommon enough that we sort of don't care. It's certainly sound
+ // to fire off another compile right now so long as we're not already compiling and
+ // we don't already have an optimized replacement. Note, we don't do this for
+ // obviously bad cases like global code, where we know that there is a slim chance
+ // of this code being invoked ever again.
+ CompilationKey keyForReplacement(codeBlock->baselineVersion(), FTLMode);
+ if (codeBlock->codeType() != GlobalCode
+ && !codeBlock->hasOptimizedReplacement()
+ && (!vm->worklist.get()
+ || vm->worklist->compilationState(keyForReplacement) == Worklist::NotKnown)) {
+ compile(
+ *vm, codeBlock->newReplacement().get(), FTLMode, UINT_MAX, Operands<JSValue>(),
+ ToFTLDeferredCompilationCallback::create(codeBlock), vm->ensureWorklist());
+ }
- if (forEntryResult != CompilationSuccessful) {
- ASSERT(forEntryResult == CompilationDeferred || replacementCodeBlock->hasOneRef());
+ if (forEntryResult != CompilationSuccessful)
return 0;
- }
-
+
// It's possible that the for-entry compile already succeeded. In that case OSR
// entry will succeed unless we ran out of stack. It's not clear what we should do.
// We signal to try again after a while if that happens.
void* address = FTL::prepareOSREntry(
exec, codeBlock, jitCode->osrEntryBlock.get(), bytecodeIndex, streamIndex);
+ if (address)
+ jitCode->optimizeSoon(codeBlock);
+ else
+ jitCode->optimizeAfterWarmUp(codeBlock);
return static_cast<char*>(address);
}
+
+// FIXME: Make calls work well. Currently they're a pure regression.
+// https://bugs.webkit.org/show_bug.cgi?id=113621
+EncodedJSValue JIT_OPERATION operationFTLCall(ExecState* exec)
+{
+ ExecState* callerExec = exec->callerFrame();
+
+ VM* vm = &callerExec->vm();
+ NativeCallFrameTracer tracer(vm, callerExec);
+
+ JSValue callee = exec->calleeAsValue();
+ CallData callData;
+ CallType callType = getCallData(callee, callData);
+ if (callType == CallTypeNone) {
+ vm->throwException(callerExec, createNotAFunctionError(callerExec, callee));
+ return JSValue::encode(jsUndefined());
+ }
+
+ return JSValue::encode(call(callerExec, callee, callType, callData, exec->thisValue(), exec));
+}
+
+// FIXME: Make calls work well. Currently they're a pure regression.
+// https://bugs.webkit.org/show_bug.cgi?id=113621
+EncodedJSValue JIT_OPERATION operationFTLConstruct(ExecState* exec)
+{
+ ExecState* callerExec = exec->callerFrame();
+
+ VM* vm = &callerExec->vm();
+ NativeCallFrameTracer tracer(vm, callerExec);
+
+ JSValue callee = exec->calleeAsValue();
+ ConstructData constructData;
+ ConstructType constructType = getConstructData(callee, constructData);
+ if (constructType == ConstructTypeNone) {
+ vm->throwException(callerExec, createNotAFunctionError(callerExec, callee));
+ return JSValue::encode(jsUndefined());
+ }
+
+ return JSValue::encode(construct(callerExec, callee, constructType, constructData, exec));
+}
#endif // ENABLE(FTL_JIT)
} // extern "C"
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.h b/Source/JavaScriptCore/dfg/DFGOperations.h
index 55b401529..996bbedab 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.h
+++ b/Source/JavaScriptCore/dfg/DFGOperations.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,12 +28,13 @@
#if ENABLE(DFG_JIT)
+#include "DFGJITCompiler.h"
#include "JITOperations.h"
#include "PutKind.h"
-namespace JSC { namespace DFG {
+namespace JSC {
-struct OSRExitBase;
+namespace DFG {
extern "C" {
@@ -96,17 +97,15 @@ EncodedJSValue JIT_OPERATION operationRegExpExec(ExecState*, JSCell*, JSCell*) W
size_t JIT_OPERATION operationRegExpTest(ExecState*, JSCell*, JSCell*) WTF_INTERNAL;
size_t JIT_OPERATION operationCompareStrictEqCell(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL;
size_t JIT_OPERATION operationCompareStrictEq(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL;
-JSCell* JIT_OPERATION operationCreateActivationDirect(ExecState*, Structure*, JSScope*, SymbolTable*, EncodedJSValue);
-JSCell* JIT_OPERATION operationCreateDirectArguments(ExecState*, Structure*, int32_t length, int32_t minCapacity);
-JSCell* JIT_OPERATION operationCreateDirectArgumentsDuringExit(ExecState*, InlineCallFrame*, JSFunction*, int32_t argumentCount);
-JSCell* JIT_OPERATION operationCreateScopedArguments(ExecState*, Structure*, Register* argumentStart, int32_t length, JSFunction* callee, JSLexicalEnvironment*);
-JSCell* JIT_OPERATION operationCreateClonedArgumentsDuringExit(ExecState*, InlineCallFrame*, JSFunction*, int32_t argumentCount);
-JSCell* JIT_OPERATION operationCreateClonedArguments(ExecState*, Structure*, Register* argumentStart, int32_t length, JSFunction* callee);
+JSCell* JIT_OPERATION operationCreateInlinedArguments(ExecState*, InlineCallFrame*) WTF_INTERNAL;
+void JIT_OPERATION operationTearOffInlinedArguments(ExecState*, JSCell*, JSCell*, InlineCallFrame*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetInlinedArgumentByVal(ExecState*, int32_t, InlineCallFrame*, int32_t) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetArgumentByVal(ExecState*, int32_t, int32_t) WTF_INTERNAL;
+JSCell* JIT_OPERATION operationNewFunctionNoCheck(ExecState*, JSCell*) WTF_INTERNAL;
double JIT_OPERATION operationFModOnInts(int32_t, int32_t) WTF_INTERNAL;
-size_t JIT_OPERATION operationObjectIsObject(ExecState*, JSGlobalObject*, JSCell*) WTF_INTERNAL;
-size_t JIT_OPERATION operationObjectIsFunction(ExecState*, JSGlobalObject*, JSCell*) WTF_INTERNAL;
-JSCell* JIT_OPERATION operationTypeOfObject(ExecState*, JSGlobalObject*, JSCell*) WTF_INTERNAL;
-int32_t JIT_OPERATION operationTypeOfObjectAsTypeofType(ExecState*, JSGlobalObject*, JSCell*) WTF_INTERNAL;
+size_t JIT_OPERATION operationIsObject(ExecState*, EncodedJSValue) WTF_INTERNAL;
+size_t JIT_OPERATION operationIsFunction(EncodedJSValue) WTF_INTERNAL;
+JSCell* JIT_OPERATION operationTypeOf(ExecState*, JSCell*) WTF_INTERNAL;
char* JIT_OPERATION operationAllocatePropertyStorageWithInitialCapacity(ExecState*) WTF_INTERNAL;
char* JIT_OPERATION operationAllocatePropertyStorage(ExecState*, size_t newSize) WTF_INTERNAL;
char* JIT_OPERATION operationReallocateButterflyToHavePropertyStorageWithInitialCapacity(ExecState*, JSObject*) WTF_INTERNAL;
@@ -114,6 +113,7 @@ char* JIT_OPERATION operationReallocateButterflyToGrowPropertyStorage(ExecState*
char* JIT_OPERATION operationEnsureInt32(ExecState*, JSCell*);
char* JIT_OPERATION operationEnsureDouble(ExecState*, JSCell*);
char* JIT_OPERATION operationEnsureContiguous(ExecState*, JSCell*);
+char* JIT_OPERATION operationRageEnsureContiguous(ExecState*, JSCell*);
char* JIT_OPERATION operationEnsureArrayStorage(ExecState*, JSCell*);
StringImpl* JIT_OPERATION operationResolveRope(ExecState*, JSString*);
JSString* JIT_OPERATION operationSingleCharacterString(ExecState*, int32_t);
@@ -121,22 +121,18 @@ JSString* JIT_OPERATION operationSingleCharacterString(ExecState*, int32_t);
JSCell* JIT_OPERATION operationNewStringObject(ExecState*, JSString*, Structure*);
JSCell* JIT_OPERATION operationToStringOnCell(ExecState*, JSCell*);
JSCell* JIT_OPERATION operationToString(ExecState*, EncodedJSValue);
-JSCell* JIT_OPERATION operationCallStringConstructorOnCell(ExecState*, JSCell*);
-JSCell* JIT_OPERATION operationCallStringConstructor(ExecState*, EncodedJSValue);
JSCell* JIT_OPERATION operationMakeRope2(ExecState*, JSString*, JSString*);
JSCell* JIT_OPERATION operationMakeRope3(ExecState*, JSString*, JSString*, JSString*);
char* JIT_OPERATION operationFindSwitchImmTargetForDouble(ExecState*, EncodedJSValue, size_t tableIndex);
char* JIT_OPERATION operationSwitchString(ExecState*, size_t tableIndex, JSString*);
-int32_t JIT_OPERATION operationSwitchStringAndGetBranchOffset(ExecState*, size_t tableIndex, JSString*);
-void JIT_OPERATION operationNotifyWrite(ExecState*, WatchpointSet*);
-void JIT_OPERATION operationThrowStackOverflowForVarargs(ExecState*) WTF_INTERNAL;
-int32_t JIT_OPERATION operationSizeOfVarargs(ExecState*, EncodedJSValue arguments, int32_t firstVarArgOffset);
-void JIT_OPERATION operationLoadVarargs(ExecState*, int32_t firstElementDest, EncodedJSValue arguments, int32_t offset, int32_t length, int32_t mandatoryMinimum);
+void JIT_OPERATION operationInvalidate(ExecState*, VariableWatchpointSet*);
-int64_t JIT_OPERATION operationConvertBoxedDoubleToInt52(EncodedJSValue);
-int64_t JIT_OPERATION operationConvertDoubleToInt52(double);
-
-void JIT_OPERATION operationProcessTypeProfilerLogDFG(ExecState*) WTF_INTERNAL;
+#if ENABLE(FTL_JIT)
+// FIXME: Make calls work well. Currently they're a pure regression.
+// https://bugs.webkit.org/show_bug.cgi?id=113621
+EncodedJSValue JIT_OPERATION operationFTLCall(ExecState*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationFTLConstruct(ExecState*) WTF_INTERNAL;
+#endif // ENABLE(FTL_JIT)
// These operations implement the implicitly called ToInt32 and ToBoolean conversions from ES5.
// This conversion returns an int32_t within a size_t such that the value is zero extended to fill the register.
@@ -144,11 +140,10 @@ size_t JIT_OPERATION dfgConvertJSValueToInt32(ExecState*, EncodedJSValue) WTF_IN
void JIT_OPERATION debugOperationPrintSpeculationFailure(ExecState*, void*, void*) WTF_INTERNAL;
-void JIT_OPERATION triggerReoptimizationNow(CodeBlock*, OSRExitBase*) WTF_INTERNAL;
+void JIT_OPERATION triggerReoptimizationNow(CodeBlock*) WTF_INTERNAL;
#if ENABLE(FTL_JIT)
void JIT_OPERATION triggerTierUpNow(ExecState*) WTF_INTERNAL;
-void JIT_OPERATION triggerTierUpNowInLoop(ExecState*) WTF_INTERNAL;
char* JIT_OPERATION triggerOSREntryNow(ExecState*, int32_t bytecodeIndex, int32_t streamIndex) WTF_INTERNAL;
#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.cpp b/Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.cpp
deleted file mode 100644
index b9e023fb6..000000000
--- a/Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.cpp
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGPhantomInsertionPhase.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "BytecodeLivenessAnalysisInlines.h"
-#include "DFGForAllKills.h"
-#include "DFGGraph.h"
-#include "DFGInsertionSet.h"
-#include "DFGMayExit.h"
-#include "DFGPhase.h"
-#include "DFGPredictionPropagationPhase.h"
-#include "DFGVariableAccessDataDump.h"
-#include "JSCInlines.h"
-#include "OperandsInlines.h"
-
-namespace JSC { namespace DFG {
-
-namespace {
-
-bool verbose = false;
-
-class PhantomInsertionPhase : public Phase {
-public:
- PhantomInsertionPhase(Graph& graph)
- : Phase(graph, "phantom insertion")
- , m_insertionSet(graph)
- , m_values(OperandsLike, graph.block(0)->variablesAtHead)
- {
- }
-
- bool run()
- {
- // We assume that DCE has already run. If we run before DCE then we think that all
- // SetLocals execute, which is inaccurate. That causes us to insert too few Phantoms.
- DFG_ASSERT(m_graph, nullptr, m_graph.m_refCountState == ExactRefCount);
-
- if (verbose) {
- dataLog("Graph before Phantom insertion:\n");
- m_graph.dump();
- }
-
- m_graph.clearEpochs();
-
- for (BasicBlock* block : m_graph.blocksInNaturalOrder())
- handleBlock(block);
-
- if (verbose) {
- dataLog("Graph after Phantom insertion:\n");
- m_graph.dump();
- }
-
- return true;
- }
-
-private:
- void handleBlock(BasicBlock* block)
- {
- // FIXME: For blocks that have low register pressure, it would make the most sense to
- // simply insert Phantoms at the last point possible since that would obviate the need to
- // query bytecode liveness:
- //
- // - If we MovHint @x into loc42 then put a Phantom on the last MovHinted value in loc42.
- // - At the end of the block put Phantoms for each MovHinted value.
- //
- // This will definitely not work if there are any phantom allocations. For those blocks
- // where this would be legal, it remains to be seen how profitable it would be even if there
- // was high register pressure. After all, a Phantom would cause a spill but it wouldn't
- // cause a fill.
- //
- // https://bugs.webkit.org/show_bug.cgi?id=144524
-
- m_values.fill(nullptr);
-
- Epoch currentEpoch = Epoch::first();
- unsigned lastExitingIndex = 0;
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
- if (verbose)
- dataLog("Considering ", node, "\n");
-
- switch (node->op()) {
- case MovHint:
- m_values.operand(node->unlinkedLocal()) = node->child1().node();
- break;
-
- case ZombieHint:
- m_values.operand(node->unlinkedLocal()) = nullptr;
- break;
-
- case SetLocal:
- case GetLocal:
- case SetArgument:
- m_values.operand(node->local()) = nullptr;
- break;
-
- default:
- break;
- }
-
- if (mayExit(m_graph, node)) {
- currentEpoch.bump();
- lastExitingIndex = nodeIndex;
- }
-
- m_graph.doToChildren(
- node,
- [&] (Edge edge) {
- edge->setEpoch(currentEpoch);
- });
-
- node->setEpoch(currentEpoch);
-
- forAllKilledOperands(
- m_graph, node, block->tryAt(nodeIndex + 1),
- [&] (VirtualRegister reg) {
- if (verbose)
- dataLog(" Killed operand: ", reg, "\n");
-
- Node* killedNode = m_values.operand(reg);
- if (!killedNode)
- return;
-
- // We only need to insert a Phantom if the node hasn't been used since the last
- // exit, and was born before the last exit.
- if (killedNode->epoch() == currentEpoch)
- return;
-
- if (verbose) {
- dataLog(
- " Inserting Phantom on ", killedNode, " after ",
- block->at(lastExitingIndex), "\n");
- }
-
- // We have exact ref counts, so creating a new use means that we have to
- // increment the ref count.
- killedNode->postfixRef();
-
- m_insertionSet.insertNode(
- lastExitingIndex + 1, SpecNone, Phantom,
- block->at(lastExitingIndex)->origin, killedNode->defaultEdge());
- });
- }
-
- m_insertionSet.execute(block);
- }
-
- InsertionSet m_insertionSet;
- Operands<Node*> m_values;
-};
-
-} // anonymous namespace
-
-bool performPhantomInsertion(Graph& graph)
-{
- SamplingRegion samplingRegion("DFG Phantom Insertion Phase");
- return runPhase<PhantomInsertionPhase>(graph);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.h b/Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.h
deleted file mode 100644
index 902975b31..000000000
--- a/Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGPhantomInsertionPhase_h
-#define DFGPhantomInsertionPhase_h
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-class Graph;
-
-// Inserts Phantoms based on bytecode liveness.
-
-bool performPhantomInsertion(Graph&);
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGPhantomInsertionPhase_h
diff --git a/Source/JavaScriptCore/dfg/DFGPhase.cpp b/Source/JavaScriptCore/dfg/DFGPhase.cpp
index c13c8c449..32e039ec5 100644
--- a/Source/JavaScriptCore/dfg/DFGPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGPhase.cpp
@@ -29,21 +29,13 @@
#if ENABLE(DFG_JIT)
#include "DFGValidate.h"
-#include "JSCInlines.h"
namespace JSC { namespace DFG {
void Phase::beginPhase()
{
- if (Options::verboseValidationFailure()) {
- StringPrintStream out;
- m_graph.dump(out);
- m_graphDumpBeforePhase = out.toCString();
- }
-
if (!shouldDumpGraphAtEachPhase())
return;
-
dataLog("Beginning DFG phase ", m_name, ".\n");
dataLog("Before ", m_name, ":\n");
m_graph.dump();
@@ -53,7 +45,7 @@ void Phase::endPhase()
{
if (!Options::validateGraphAtEachPhase())
return;
- validate(m_graph, DumpGraph, m_graphDumpBeforePhase);
+ validate(m_graph, DumpGraph);
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGPhase.h b/Source/JavaScriptCore/dfg/DFGPhase.h
index 5e8b329fa..6de043bbd 100644
--- a/Source/JavaScriptCore/dfg/DFGPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGPhase_h
#define DFGPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
@@ -49,8 +51,6 @@ public:
const char* name() const { return m_name; }
- Graph& graph() { return m_graph; }
-
// Each phase must have a run() method.
protected:
@@ -67,15 +67,13 @@ private:
// Call these hooks when starting and finishing.
void beginPhase();
void endPhase();
-
- CString m_graphDumpBeforePhase;
};
template<typename PhaseType>
bool runAndLog(PhaseType& phase)
{
bool result = phase.run();
- if (result && logCompilationChanges(phase.graph().m_plan.mode))
+ if (result && logCompilationChanges())
dataLogF("Phase %s changed the IR.\n", phase.name());
return result;
}
diff --git a/Source/JavaScriptCore/dfg/DFGPhiChildren.h b/Source/JavaScriptCore/dfg/DFGPhiChildren.h
deleted file mode 100644
index 808512ed1..000000000
--- a/Source/JavaScriptCore/dfg/DFGPhiChildren.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGPhiChildren_h
-#define DFGPhiChildren_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGNode.h"
-#include <wtf/HashSet.h>
-#include <wtf/Vector.h>
-
-namespace JSC { namespace DFG {
-
-class Graph;
-
-class PhiChildren {
-public:
- typedef Vector<Node*, 3> List;
-
- PhiChildren();
- PhiChildren(Graph&);
- ~PhiChildren();
-
- // The list of Upsilons that point to the children of the Phi.
- const List& upsilonsOf(Node*) const;
-
- template<typename Functor>
- void forAllIncomingValues(Node* node, const Functor& functor)
- {
- for (Node* upsilon : upsilonsOf(node))
- functor(upsilon->child1().node());
- }
-
- // This walks the Phi graph.
- template<typename Functor>
- void forAllTransitiveIncomingValues(Node* node, const Functor& functor)
- {
- if (node->op() != Phi) {
- functor(node);
- return;
- }
- HashSet<Node*> seen;
- Vector<Node*> worklist;
- seen.add(node);
- worklist.append(node);
- while (!worklist.isEmpty()) {
- Node* currentNode = worklist.takeLast();
- forAllIncomingValues(
- currentNode,
- [&] (Node* incomingNode) {
- if (incomingNode->op() == Phi) {
- if (seen.add(incomingNode).isNewEntry)
- worklist.append(incomingNode);
- } else
- functor(incomingNode);
- });
- }
- }
-
-private:
- HashMap<Node*, List> m_children;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGPhiChildren_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGPlan.cpp b/Source/JavaScriptCore/dfg/DFGPlan.cpp
index b5696bc5d..735f5ffa2 100644
--- a/Source/JavaScriptCore/dfg/DFGPlan.cpp
+++ b/Source/JavaScriptCore/dfg/DFGPlan.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,55 +28,43 @@
#if ENABLE(DFG_JIT)
-#include "DFGArgumentsEliminationPhase.h"
+#include "DFGArgumentsSimplificationPhase.h"
#include "DFGBackwardsPropagationPhase.h"
#include "DFGByteCodeParser.h"
#include "DFGCFAPhase.h"
#include "DFGCFGSimplificationPhase.h"
#include "DFGCPSRethreadingPhase.h"
#include "DFGCSEPhase.h"
-#include "DFGCleanUpPhase.h"
#include "DFGConstantFoldingPhase.h"
-#include "DFGConstantHoistingPhase.h"
#include "DFGCriticalEdgeBreakingPhase.h"
#include "DFGDCEPhase.h"
#include "DFGFailedFinalizer.h"
+#include "DFGFlushLivenessAnalysisPhase.h"
#include "DFGFixupPhase.h"
-#include "DFGGraphSafepoint.h"
-#include "DFGIntegerCheckCombiningPhase.h"
-#include "DFGIntegerRangeOptimizationPhase.h"
#include "DFGInvalidationPointInjectionPhase.h"
#include "DFGJITCompiler.h"
#include "DFGLICMPhase.h"
#include "DFGLivenessAnalysisPhase.h"
#include "DFGLoopPreHeaderCreationPhase.h"
-#include "DFGMovHintRemovalPhase.h"
#include "DFGOSRAvailabilityAnalysisPhase.h"
#include "DFGOSREntrypointCreationPhase.h"
-#include "DFGObjectAllocationSinkingPhase.h"
-#include "DFGPhantomInsertionPhase.h"
#include "DFGPredictionInjectionPhase.h"
#include "DFGPredictionPropagationPhase.h"
-#include "DFGPutStackSinkingPhase.h"
+#include "DFGResurrectionForValidationPhase.h"
#include "DFGSSAConversionPhase.h"
#include "DFGSSALoweringPhase.h"
#include "DFGStackLayoutPhase.h"
-#include "DFGStaticExecutionCountEstimationPhase.h"
-#include "DFGStoreBarrierInsertionPhase.h"
+#include "DFGStoreBarrierElisionPhase.h"
#include "DFGStrengthReductionPhase.h"
-#include "DFGStructureRegistrationPhase.h"
#include "DFGTierUpCheckInjectionPhase.h"
#include "DFGTypeCheckHoistingPhase.h"
#include "DFGUnificationPhase.h"
#include "DFGValidate.h"
-#include "DFGVarargsForwardingPhase.h"
#include "DFGVirtualRegisterAllocationPhase.h"
#include "DFGWatchpointCollectionPhase.h"
#include "Debugger.h"
-#include "JSCInlines.h"
#include "OperandsInlines.h"
-#include "ProfilerDatabase.h"
-#include "TrackedReferences.h"
+#include "Operations.h"
#include <wtf/CurrentTime.h>
#if ENABLE(FTL_JIT)
@@ -91,17 +79,10 @@
namespace JSC { namespace DFG {
-namespace {
-
-double totalDFGCompileTime;
-double totalFTLCompileTime;
-double totalFTLDFGCompileTime;
-double totalFTLLLVMCompileTime;
-
-void dumpAndVerifyGraph(Graph& graph, const char* text, bool forceDump = false)
+static void dumpAndVerifyGraph(Graph& graph, const char* text)
{
GraphDumpMode modeForFinalValidate = DumpGraph;
- if (verboseCompilationEnabled(graph.m_plan.mode) || forceDump) {
+ if (verboseCompilationEnabled()) {
dataLog(text, "\n");
graph.dump();
modeForFinalValidate = DontDumpGraph;
@@ -110,40 +91,18 @@ void dumpAndVerifyGraph(Graph& graph, const char* text, bool forceDump = false)
validate(graph, modeForFinalValidate);
}
-Profiler::CompilationKind profilerCompilationKindForMode(CompilationMode mode)
-{
- switch (mode) {
- case InvalidCompilationMode:
- RELEASE_ASSERT_NOT_REACHED();
- return Profiler::DFG;
- case DFGMode:
- return Profiler::DFG;
- case FTLMode:
- return Profiler::FTL;
- case FTLForOSREntryMode:
- return Profiler::FTLForOSREntry;
- }
- RELEASE_ASSERT_NOT_REACHED();
- return Profiler::DFG;
-}
-
-} // anonymous namespace
-
-Plan::Plan(PassRefPtr<CodeBlock> passedCodeBlock, CodeBlock* profiledDFGCodeBlock,
- CompilationMode mode, unsigned osrEntryBytecodeIndex,
- const Operands<JSValue>& mustHandleValues)
+Plan::Plan(
+ PassRefPtr<CodeBlock> passedCodeBlock, CompilationMode mode,
+ unsigned osrEntryBytecodeIndex, const Operands<JSValue>& mustHandleValues)
: vm(*passedCodeBlock->vm())
, codeBlock(passedCodeBlock)
- , profiledDFGCodeBlock(profiledDFGCodeBlock)
, mode(mode)
, osrEntryBytecodeIndex(osrEntryBytecodeIndex)
, mustHandleValues(mustHandleValues)
- , compilation(codeBlock->vm()->m_perBytecodeProfiler ? adoptRef(new Profiler::Compilation(codeBlock->vm()->m_perBytecodeProfiler->ensureBytecodesFor(codeBlock.get()), profilerCompilationKindForMode(mode))) : 0)
- , inlineCallFrames(adoptRef(new InlineCallFrameSet()))
+ , compilation(codeBlock->vm()->m_perBytecodeProfiler ? adoptRef(new Profiler::Compilation(codeBlock->vm()->m_perBytecodeProfiler->ensureBytecodesFor(codeBlock.get()), Profiler::DFG)) : 0)
, identifiers(codeBlock.get())
, weakReferences(codeBlock.get())
- , willTryToTierUp(false)
- , stage(Preparing)
+ , isCompiled(false)
{
}
@@ -151,54 +110,23 @@ Plan::~Plan()
{
}
-bool Plan::computeCompileTimes() const
-{
- return reportCompileTimes()
- || Options::reportTotalCompileTimes();
-}
-
-bool Plan::reportCompileTimes() const
-{
- return Options::reportCompileTimes()
- || (Options::reportFTLCompileTimes() && isFTL(mode));
-}
-
-void Plan::compileInThread(LongLivedState& longLivedState, ThreadData* threadData)
+void Plan::compileInThread(LongLivedState& longLivedState)
{
- this->threadData = threadData;
-
double before = 0;
- CString codeBlockName;
- if (computeCompileTimes())
- before = monotonicallyIncreasingTimeMS();
- if (reportCompileTimes())
- codeBlockName = toCString(*codeBlock);
+ if (Options::reportCompileTimes())
+ before = currentTimeMS();
SamplingRegion samplingRegion("DFG Compilation (Plan)");
CompilationScope compilationScope;
- if (logCompilationChanges(mode))
+ if (logCompilationChanges())
dataLog("DFG(Plan) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n");
CompilationPath path = compileInThreadImpl(longLivedState);
- RELEASE_ASSERT(path == CancelPath || finalizer);
- RELEASE_ASSERT((path == CancelPath) == (stage == Cancelled));
-
- double after = 0;
- if (computeCompileTimes())
- after = monotonicallyIncreasingTimeMS();
+ RELEASE_ASSERT(finalizer);
- if (Options::reportTotalCompileTimes()) {
- if (isFTL(mode)) {
- totalFTLCompileTime += after - before;
- totalFTLDFGCompileTime += m_timeBeforeFTL - before;
- totalFTLLLVMCompileTime += after - m_timeBeforeFTL;
- } else
- totalDFGCompileTime += after - before;
- }
-
- if (reportCompileTimes()) {
+ if (Options::reportCompileTimes()) {
const char* pathName;
switch (path) {
case FailPath:
@@ -210,26 +138,22 @@ void Plan::compileInThread(LongLivedState& longLivedState, ThreadData* threadDat
case FTLPath:
pathName = "FTL";
break;
- case CancelPath:
- pathName = "Cancelled";
- break;
default:
RELEASE_ASSERT_NOT_REACHED();
-#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
pathName = "";
-#endif
break;
}
- dataLog("Optimized ", codeBlockName, " using ", mode, " with ", pathName, " into ", finalizer ? finalizer->codeSize() : 0, " bytes in ", after - before, " ms");
+ double now = currentTimeMS();
+ dataLog("Optimized ", *codeBlock->alternative(), " using ", mode, " with ", pathName, " in ", now - before, " ms");
if (path == FTLPath)
- dataLog(" (DFG: ", m_timeBeforeFTL - before, ", LLVM: ", after - m_timeBeforeFTL, ")");
+ dataLog(" (DFG: ", beforeFTL - before, ", LLVM: ", now - beforeFTL, ")");
dataLog(".\n");
}
}
Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
{
- if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) {
+ if (verboseCompilationEnabled() && osrEntryBytecodeIndex != UINT_MAX) {
dataLog("\n");
dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n");
dataLog("\n");
@@ -238,7 +162,7 @@ Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
Graph dfg(vm, *this, longLivedState);
if (!parse(dfg)) {
- finalizer = std::make_unique<FailedFinalizer>(*this);
+ finalizer = adoptPtr(new FailedFinalizer(*this));
return FailPath;
}
@@ -251,21 +175,14 @@ Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
if (validationEnabled())
validate(dfg);
- if (Options::dumpGraphAfterParsing()) {
- dataLog("Graph after parsing:\n");
- dfg.dump();
- }
-
performCPSRethreading(dfg);
performUnification(dfg);
performPredictionInjection(dfg);
- performStaticExecutionCountEstimation(dfg);
-
if (mode == FTLForOSREntryMode) {
bool result = performOSREntrypointCreation(dfg);
if (!result) {
- finalizer = std::make_unique<FailedFinalizer>(*this);
+ finalizer = adoptPtr(new FailedFinalizer(*this));
return FailPath;
}
performCPSRethreading(dfg);
@@ -277,89 +194,66 @@ Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
performBackwardsPropagation(dfg);
performPredictionPropagation(dfg);
performFixup(dfg);
- performStructureRegistration(dfg);
performInvalidationPointInjection(dfg);
performTypeCheckHoisting(dfg);
+ unsigned count = 1;
dfg.m_fixpointState = FixpointNotConverged;
-
- // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision
- // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is
- // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means
- // that the compiler compiles more quickly. We want the third tier to compile quickly, which
- // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint.
- if (validationEnabled())
- validate(dfg);
+ for (;; ++count) {
+ if (logCompilationChanges())
+ dataLogF("DFG beginning optimization fixpoint iteration #%u.\n", count);
+ bool changed = false;
- performStrengthReduction(dfg);
- performLocalCSE(dfg);
- performCPSRethreading(dfg);
- performCFA(dfg);
- performConstantFolding(dfg);
- bool changed = false;
- changed |= performCFGSimplification(dfg);
- changed |= performLocalCSE(dfg);
-
- if (validationEnabled())
- validate(dfg);
-
- performCPSRethreading(dfg);
- if (!isFTL(mode)) {
- // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and
- // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL
- // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack
- // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases,
- // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an
- // escape for all of the arguments. This then disables object allocation sinking.
- //
- // So, for now, we just disable this phase for the FTL.
- //
- // If we wanted to enable it, we'd have to do any of the following:
- // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before
- // PutStack sinking and object allocation sinking.
- // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into
- // GetStack+PutStack.
- //
- // But, it's not super valuable to enable those optimizations, since the FTL
- // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this
- // pathology.
+ if (validationEnabled())
+ validate(dfg);
- changed |= performVarargsForwarding(dfg); // Do this after CFG simplification and CPS rethreading.
- }
- if (changed) {
+ changed |= performStrengthReduction(dfg);
performCFA(dfg);
- performConstantFolding(dfg);
+ changed |= performConstantFolding(dfg);
+ changed |= performArgumentsSimplification(dfg);
+ changed |= performCFGSimplification(dfg);
+ changed |= performCSE(dfg);
+
+ if (!changed)
+ break;
+
+ performCPSRethreading(dfg);
}
+ if (logCompilationChanges())
+ dataLogF("DFG optimization fixpoint converged in %u iterations.\n", count);
+
+ dfg.m_fixpointState = FixpointConverged;
+
+ performStoreBarrierElision(dfg);
+ performStoreElimination(dfg);
+
// If we're doing validation, then run some analyses, to give them an opportunity
// to self-validate. Now is as good a time as any to do this.
if (validationEnabled()) {
dfg.m_dominators.computeIfNecessary(dfg);
dfg.m_naturalLoops.computeIfNecessary(dfg);
- dfg.m_prePostNumbering.computeIfNecessary(dfg);
}
switch (mode) {
case DFGMode: {
- dfg.m_fixpointState = FixpointConverged;
-
performTierUpCheckInjection(dfg);
- performFastStoreBarrierInsertion(dfg);
- performCleanUp(dfg);
performCPSRethreading(dfg);
performDCE(dfg);
- performPhantomInsertion(dfg);
performStackLayout(dfg);
performVirtualRegisterAllocation(dfg);
performWatchpointCollection(dfg);
dumpAndVerifyGraph(dfg, "Graph after optimization:");
JITCompiler dataFlowJIT(dfg);
- if (codeBlock->codeType() == FunctionCode)
+ if (codeBlock->codeType() == FunctionCode) {
dataFlowJIT.compileFunction();
- else
+ dataFlowJIT.linkFunction();
+ } else {
dataFlowJIT.compile();
+ dataFlowJIT.link();
+ }
return DFGPath;
}
@@ -368,132 +262,53 @@ Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
case FTLForOSREntryMode: {
#if ENABLE(FTL_JIT)
if (FTL::canCompile(dfg) == FTL::CannotCompile) {
- finalizer = std::make_unique<FailedFinalizer>(*this);
+ finalizer = adoptPtr(new FailedFinalizer(*this));
return FailPath;
}
- performCleanUp(dfg); // Reduce the graph size a bit.
performCriticalEdgeBreaking(dfg);
performLoopPreHeaderCreation(dfg);
performCPSRethreading(dfg);
performSSAConversion(dfg);
performSSALowering(dfg);
-
- // Ideally, these would be run to fixpoint with the object allocation sinking phase.
- performArgumentsElimination(dfg);
- performPutStackSinking(dfg);
-
- performConstantHoisting(dfg);
- performGlobalCSE(dfg);
- performLivenessAnalysis(dfg);
- performIntegerRangeOptimization(dfg);
performLivenessAnalysis(dfg);
performCFA(dfg);
- performConstantFolding(dfg);
- performCleanUp(dfg); // Reduce the graph size a lot.
- changed = false;
- changed |= performStrengthReduction(dfg);
- if (Options::enableObjectAllocationSinking()) {
- changed |= performCriticalEdgeBreaking(dfg);
- changed |= performObjectAllocationSinking(dfg);
- }
- if (changed) {
- // State-at-tail and state-at-head will be invalid if we did strength reduction since
- // it might increase live ranges.
- performLivenessAnalysis(dfg);
- performCFA(dfg);
- performConstantFolding(dfg);
- }
-
- // Currently, this relies on pre-headers still being valid. That precludes running CFG
- // simplification before it, unless we re-created the pre-headers. There wouldn't be anything
- // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point.
- // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that
- // then we'd need to do some simple SSA fix-up.
performLICM(dfg);
-
- performCleanUp(dfg);
- performIntegerCheckCombining(dfg);
- performGlobalCSE(dfg);
-
- // At this point we're not allowed to do any further code motion because our reasoning
- // about code motion assumes that it's OK to insert GC points in random places.
- dfg.m_fixpointState = FixpointConverged;
-
+ performCSE(dfg);
performLivenessAnalysis(dfg);
performCFA(dfg);
- performGlobalStoreBarrierInsertion(dfg);
- if (Options::enableMovHintRemoval())
- performMovHintRemoval(dfg);
- performCleanUp(dfg);
- performDCE(dfg); // We rely on this to kill dead code that won't be recognized as dead by LLVM.
+ if (Options::validateFTLOSRExitLiveness())
+ performResurrectionForValidation(dfg);
+ performDCE(dfg); // We rely on this to convert dead SetLocals into the appropriate hint, and to kill dead code that won't be recognized as dead by LLVM.
performStackLayout(dfg);
performLivenessAnalysis(dfg);
+ performFlushLivenessAnalysis(dfg);
performOSRAvailabilityAnalysis(dfg);
performWatchpointCollection(dfg);
- if (FTL::canCompile(dfg) == FTL::CannotCompile) {
- finalizer = std::make_unique<FailedFinalizer>(*this);
- return FailPath;
- }
-
- dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldShowDisassembly(mode));
+ dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:");
- bool haveLLVM;
- Safepoint::Result safepointResult;
- {
- GraphSafepoint safepoint(dfg, safepointResult);
- haveLLVM = initializeLLVM();
- }
- if (safepointResult.didGetCancelled())
- return CancelPath;
+ initializeLLVM();
- if (!haveLLVM) {
- if (Options::ftlCrashesIfCantInitializeLLVM()) {
- dataLog("LLVM can't be initialized.\n");
- CRASH();
- }
- finalizer = std::make_unique<FailedFinalizer>(*this);
- return FailPath;
- }
-
FTL::State state(dfg);
FTL::lowerDFGToLLVM(state);
- if (computeCompileTimes())
- m_timeBeforeFTL = monotonicallyIncreasingTimeMS();
+ if (Options::reportCompileTimes())
+ beforeFTL = currentTimeMS();
if (Options::llvmAlwaysFailsBeforeCompile()) {
FTL::fail(state);
return FTLPath;
}
- FTL::compile(state, safepointResult);
- if (safepointResult.didGetCancelled())
- return CancelPath;
-
+ FTL::compile(state);
+
if (Options::llvmAlwaysFailsBeforeLink()) {
FTL::fail(state);
return FTLPath;
}
- if (state.allocationFailed) {
- FTL::fail(state);
- return FTLPath;
- }
-
- if (state.jitCode->stackmaps.stackSize() > Options::llvmMaxStackSize()) {
- FTL::fail(state);
- return FTLPath;
- }
-
FTL::link(state);
-
- if (state.allocationFailed) {
- FTL::fail(state);
- return FTLPath;
- }
-
return FTLPath;
#else
RELEASE_ASSERT_NOT_REACHED();
@@ -509,18 +324,8 @@ Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
bool Plan::isStillValid()
{
- CodeBlock* replacement = codeBlock->replacement();
- if (!replacement)
- return false;
- // FIXME: This is almost certainly not necessary. There's no way for the baseline
- // code to be replaced during a compilation, except if we delete the plan, in which
- // case we wouldn't be here.
- // https://bugs.webkit.org/show_bug.cgi?id=132707
- if (codeBlock->alternative() != replacement->baselineVersion())
- return false;
- if (!watchpoints.areStillValid())
- return false;
- return true;
+ return watchpoints.areStillValid()
+ && chains.areStillValid();
}
void Plan::reallyAdd(CommonData* commonData)
@@ -529,32 +334,27 @@ void Plan::reallyAdd(CommonData* commonData)
identifiers.reallyAdd(vm, commonData);
weakReferences.reallyAdd(vm, commonData);
transitions.reallyAdd(vm, commonData);
-}
-
-void Plan::notifyCompiling()
-{
- stage = Compiling;
-}
-
-void Plan::notifyCompiled()
-{
- stage = Compiled;
+ writeBarriers.trigger(vm);
}
void Plan::notifyReady()
{
callback->compilationDidBecomeReadyAsynchronously(codeBlock.get());
- stage = Ready;
+ isCompiled = true;
}
CompilationResult Plan::finalizeWithoutNotifyingCallback()
{
- // We will establish new references from the code block to things. So, we need a barrier.
- vm.heap.writeBarrier(codeBlock->ownerExecutable());
-
if (!isStillValid())
return CompilationInvalidated;
+ if (vm.enabledProfiler())
+ return CompilationInvalidated;
+
+ Debugger* debugger = codeBlock->globalObject()->debugger();
+ if (debugger && (debugger->isStepping() || codeBlock->baselineAlternative()->hasDebuggerRequests()))
+ return CompilationInvalidated;
+
bool result;
if (codeBlock->codeType() == FunctionCode)
result = finalizer->finalizeFunction();
@@ -566,21 +366,6 @@ CompilationResult Plan::finalizeWithoutNotifyingCallback()
reallyAdd(codeBlock->jitCode()->dfgCommon());
- if (validationEnabled()) {
- TrackedReferences trackedReferences;
-
- for (WriteBarrier<JSCell>& reference : codeBlock->jitCode()->dfgCommon()->weakReferences)
- trackedReferences.add(reference.get());
- for (WriteBarrier<Structure>& reference : codeBlock->jitCode()->dfgCommon()->weakStructureReferences)
- trackedReferences.add(reference.get());
- for (WriteBarrier<Unknown>& constant : codeBlock->constants())
- trackedReferences.add(constant.get());
-
- // Check that any other references that we have anywhere in the JITCode are also
- // tracked either strongly or weakly.
- codeBlock->jitCode()->validateReferences(trackedReferences);
- }
-
return CompilationSuccessful;
}
@@ -594,64 +379,6 @@ CompilationKey Plan::key()
return CompilationKey(codeBlock->alternative(), mode);
}
-void Plan::checkLivenessAndVisitChildren(SlotVisitor& visitor, CodeBlockSet& codeBlocks)
-{
- if (!isKnownToBeLiveDuringGC())
- return;
-
- for (unsigned i = mustHandleValues.size(); i--;)
- visitor.appendUnbarrieredValue(&mustHandleValues[i]);
-
- codeBlocks.mark(codeBlock->alternative());
- codeBlocks.mark(codeBlock.get());
- codeBlocks.mark(profiledDFGCodeBlock.get());
-
- weakReferences.visitChildren(visitor);
- transitions.visitChildren(visitor);
-}
-
-bool Plan::isKnownToBeLiveDuringGC()
-{
- if (stage == Cancelled)
- return false;
- if (!Heap::isMarked(codeBlock->ownerExecutable()))
- return false;
- if (!codeBlock->alternative()->isKnownToBeLiveDuringGC())
- return false;
- if (!!profiledDFGCodeBlock && !profiledDFGCodeBlock->isKnownToBeLiveDuringGC())
- return false;
- return true;
-}
-
-void Plan::cancel()
-{
- codeBlock = nullptr;
- profiledDFGCodeBlock = nullptr;
- mustHandleValues.clear();
- compilation = nullptr;
- finalizer = nullptr;
- inlineCallFrames = nullptr;
- watchpoints = DesiredWatchpoints();
- identifiers = DesiredIdentifiers();
- weakReferences = DesiredWeakReferences();
- transitions = DesiredTransitions();
- callback = nullptr;
- stage = Cancelled;
-}
-
-HashMap<CString, double> Plan::compileTimeStats()
-{
- HashMap<CString, double> result;
- if (Options::reportTotalCompileTimes()) {
- result.add("Compile Time", totalDFGCompileTime + totalFTLCompileTime);
- result.add("DFG Compile Time", totalDFGCompileTime);
- result.add("FTL Compile Time", totalFTLCompileTime);
- result.add("FTL (DFG) Compile Time", totalFTLDFGCompileTime);
- result.add("FTL (LLVM) Compile Time", totalFTLLLVMCompileTime);
- }
- return result;
-}
-
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGPlan.h b/Source/JavaScriptCore/dfg/DFGPlan.h
index 9ed7b2069..a60269798 100644
--- a/Source/JavaScriptCore/dfg/DFGPlan.h
+++ b/Source/JavaScriptCore/dfg/DFGPlan.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,103 +26,83 @@
#ifndef DFGPlan_h
#define DFGPlan_h
+#include <wtf/Platform.h>
+
#include "CompilationResult.h"
#include "DFGCompilationKey.h"
#include "DFGCompilationMode.h"
#include "DFGDesiredIdentifiers.h"
+#include "DFGDesiredStructureChains.h"
#include "DFGDesiredTransitions.h"
#include "DFGDesiredWatchpoints.h"
#include "DFGDesiredWeakReferences.h"
+#include "DFGDesiredWriteBarriers.h"
#include "DFGFinalizer.h"
#include "DeferredCompilationCallback.h"
#include "Operands.h"
#include "ProfilerCompilation.h"
-#include <wtf/HashMap.h>
#include <wtf/ThreadSafeRefCounted.h>
-#include <wtf/text/CString.h>
namespace JSC {
class CodeBlock;
-class CodeBlockSet;
-class SlotVisitor;
namespace DFG {
class LongLivedState;
-class ThreadData;
#if ENABLE(DFG_JIT)
struct Plan : public ThreadSafeRefCounted<Plan> {
Plan(
- PassRefPtr<CodeBlock> codeBlockToCompile, CodeBlock* profiledDFGCodeBlock,
- CompilationMode, unsigned osrEntryBytecodeIndex,
+ PassRefPtr<CodeBlock>, CompilationMode, unsigned osrEntryBytecodeIndex,
const Operands<JSValue>& mustHandleValues);
~Plan();
-
- void compileInThread(LongLivedState&, ThreadData*);
+
+ void compileInThread(LongLivedState&);
CompilationResult finalizeWithoutNotifyingCallback();
void finalizeAndNotifyCallback();
- void notifyCompiling();
- void notifyCompiled();
void notifyReady();
CompilationKey key();
- void checkLivenessAndVisitChildren(SlotVisitor&, CodeBlockSet&);
- bool isKnownToBeLiveDuringGC();
- void cancel();
-
VM& vm;
RefPtr<CodeBlock> codeBlock;
- RefPtr<CodeBlock> profiledDFGCodeBlock;
CompilationMode mode;
const unsigned osrEntryBytecodeIndex;
Operands<JSValue> mustHandleValues;
-
- ThreadData* threadData;
RefPtr<Profiler::Compilation> compilation;
- std::unique_ptr<Finalizer> finalizer;
+ OwnPtr<Finalizer> finalizer;
- RefPtr<InlineCallFrameSet> inlineCallFrames;
DesiredWatchpoints watchpoints;
DesiredIdentifiers identifiers;
+ DesiredStructureChains chains;
DesiredWeakReferences weakReferences;
+ DesiredWriteBarriers writeBarriers;
DesiredTransitions transitions;
-
- bool willTryToTierUp;
- enum Stage { Preparing, Compiling, Compiled, Ready, Cancelled };
- Stage stage;
+ double beforeFTL;
+
+ bool isCompiled;
RefPtr<DeferredCompilationCallback> callback;
- JS_EXPORT_PRIVATE static HashMap<CString, double> compileTimeStats();
-
private:
- bool computeCompileTimes() const;
- bool reportCompileTimes() const;
-
- enum CompilationPath { FailPath, DFGPath, FTLPath, CancelPath };
+ enum CompilationPath { FailPath, DFGPath, FTLPath };
CompilationPath compileInThreadImpl(LongLivedState&);
bool isStillValid();
void reallyAdd(CommonData*);
-
- double m_timeBeforeFTL;
};
#else // ENABLE(DFG_JIT)
class Plan : public RefCounted<Plan> {
// Dummy class to allow !ENABLE(DFG_JIT) to build.
-public:
- static HashMap<CString, double> compileTimeStats() { return HashMap<CString, double>(); }
};
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGPrePostNumbering.cpp b/Source/JavaScriptCore/dfg/DFGPrePostNumbering.cpp
deleted file mode 100644
index 166ff4536..000000000
--- a/Source/JavaScriptCore/dfg/DFGPrePostNumbering.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGPrePostNumbering.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGBlockMapInlines.h"
-#include "DFGBlockWorklist.h"
-#include "DFGGraph.h"
-
-namespace JSC { namespace DFG {
-
-PrePostNumbering::PrePostNumbering() { }
-PrePostNumbering::~PrePostNumbering() { }
-
-void PrePostNumbering::compute(Graph& graph)
-{
- m_map = BlockMap<Numbering>(graph);
-
- PostOrderBlockWorklist worklist;
- worklist.push(graph.block(0));
- unsigned nextPreNumber = 0;
- unsigned nextPostNumber = 0;
- while (BlockWithOrder item = worklist.pop()) {
- switch (item.order) {
- case PreOrder:
- m_map[item.block].m_preNumber = nextPreNumber++;
- worklist.pushPost(item.block);
- for (BasicBlock* successor : item.block->successors())
- worklist.push(successor);
- break;
- case PostOrder:
- m_map[item.block].m_postNumber = nextPostNumber++;
- break;
- }
- }
-}
-
-} } // namespace JSC::DFG
-
-namespace WTF {
-
-using namespace JSC::DFG;
-
-void printInternal(PrintStream& out, EdgeKind kind)
-{
- switch (kind) {
- case ForwardEdge:
- out.print("ForwardEdge");
- return;
- case CrossEdge:
- out.print("CrossEdge");
- return;
- case BackEdge:
- out.print("BackEdge");
- return;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
-}
-
-} // namespace WTF
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGPrePostNumbering.h b/Source/JavaScriptCore/dfg/DFGPrePostNumbering.h
deleted file mode 100644
index 286bf62af..000000000
--- a/Source/JavaScriptCore/dfg/DFGPrePostNumbering.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGPrePostNumbering_h
-#define DFGPrePostNumbering_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGAnalysis.h"
-#include "DFGBasicBlock.h"
-#include "DFGBlockMap.h"
-
-namespace JSC { namespace DFG {
-
-enum EdgeKind {
- ForwardEdge,
- CrossEdge,
- BackEdge
-};
-
-class PrePostNumbering : public Analysis<PrePostNumbering> {
-public:
- PrePostNumbering();
- ~PrePostNumbering();
-
- void compute(Graph&);
-
- unsigned preNumber(BasicBlock* block) const { return m_map[block].m_preNumber; }
- unsigned postNumber(BasicBlock* block) const { return m_map[block].m_postNumber; }
-
- // Is from a strict ancestor of to?
- bool isStrictAncestorOf(BasicBlock* from, BasicBlock* to) const
- {
- return preNumber(from) < preNumber(to)
- && postNumber(from) > postNumber(to);
- }
-
- bool isAncestorOf(BasicBlock* from, BasicBlock* to) const
- {
- return from == to || isStrictAncestorOf(from, to);
- }
-
- bool isStrictDescendantOf(BasicBlock* from, BasicBlock* to) const
- {
- return isStrictAncestorOf(to, from);
- }
-
- bool isDescendantOf(BasicBlock* from, BasicBlock* to) const
- {
- return isAncestorOf(to, from);
- }
-
- // This will give a bogus answer if there is actually no such edge. If you want to determine
- // if there is any such edge, you have to do it yourself.
- EdgeKind edgeKind(BasicBlock* from, BasicBlock* to) const
- {
- if (isStrictDescendantOf(to, from))
- return ForwardEdge;
-
- if (isAncestorOf(to, from))
- return BackEdge;
-
- return CrossEdge;
- }
-
-private:
- struct Numbering {
- unsigned m_preNumber;
- unsigned m_postNumber;
- };
-
- BlockMap<Numbering> m_map;
-};
-
-} } // namespace JSC::DFG
-
-namespace WTF {
-
-void printInternal(PrintStream&, JSC::DFG::EdgeKind);
-
-} // namespace WTF
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGPrePostNumbering_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGPreciseLocalClobberize.h b/Source/JavaScriptCore/dfg/DFGPreciseLocalClobberize.h
deleted file mode 100644
index 1cd6b09ef..000000000
--- a/Source/JavaScriptCore/dfg/DFGPreciseLocalClobberize.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGPreciseLocalClobberize_h
-#define DFGPreciseLocalClobberize_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGClobberize.h"
-#include "DFGMayExit.h"
-
-namespace JSC { namespace DFG {
-
-template<typename ReadFunctor, typename WriteFunctor, typename DefFunctor>
-class PreciseLocalClobberizeAdaptor {
-public:
- PreciseLocalClobberizeAdaptor(
- Graph& graph, Node* node,
- const ReadFunctor& read, const WriteFunctor& write, const DefFunctor& def)
- : m_graph(graph)
- , m_node(node)
- , m_read(read)
- , m_write(write)
- , m_def(def)
- {
- }
-
- void read(AbstractHeap heap)
- {
- if (heap.kind() == Stack) {
- if (heap.payload().isTop()) {
- readTop();
- return;
- }
-
- callIfAppropriate(m_read, VirtualRegister(heap.payload().value32()));
- return;
- }
-
- if (heap.overlaps(Stack)) {
- readTop();
- return;
- }
- }
-
- void write(AbstractHeap heap)
- {
- // We expect stack writes to already be precisely characterized by DFG::clobberize().
- if (heap.kind() == Stack) {
- RELEASE_ASSERT(!heap.payload().isTop());
- callIfAppropriate(m_write, VirtualRegister(heap.payload().value32()));
- return;
- }
-
- RELEASE_ASSERT(!heap.overlaps(Stack));
- }
-
- void def(PureValue)
- {
- // PureValue defs never have anything to do with locals, so ignore this.
- }
-
- void def(HeapLocation location, LazyNode node)
- {
- if (location.kind() != StackLoc)
- return;
-
- RELEASE_ASSERT(location.heap().kind() == Stack);
-
- m_def(VirtualRegister(location.heap().payload().value32()), node);
- }
-
-private:
- template<typename Functor>
- void callIfAppropriate(const Functor& functor, VirtualRegister operand)
- {
- if (operand.isLocal() && static_cast<unsigned>(operand.toLocal()) >= m_graph.block(0)->variablesAtHead.numberOfLocals())
- return;
-
- if (operand.isArgument() && !operand.isHeader() && static_cast<unsigned>(operand.toArgument()) >= m_graph.block(0)->variablesAtHead.numberOfArguments())
- return;
-
- functor(operand);
- }
-
- void readTop()
- {
- switch (m_node->op()) {
- case GetMyArgumentByVal:
- case ForwardVarargs:
- case CallForwardVarargs:
- case ConstructForwardVarargs: {
- InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame;
- if (!inlineCallFrame) {
- // Read the outermost arguments and argument count.
- for (unsigned i = m_graph.m_codeBlock->numParameters(); i-- > 1;)
- m_read(virtualRegisterForArgument(i));
- m_read(VirtualRegister(JSStack::ArgumentCount));
- break;
- }
-
- for (unsigned i = inlineCallFrame->arguments.size(); i-- > 1;)
- m_read(VirtualRegister(inlineCallFrame->stackOffset + virtualRegisterForArgument(i).offset()));
- if (inlineCallFrame->isVarargs())
- m_read(VirtualRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount));
- break;
- }
-
- default: {
- // All of the outermost arguments, except this, are definitely read.
- for (unsigned i = m_graph.m_codeBlock->numParameters(); i-- > 1;)
- m_read(virtualRegisterForArgument(i));
-
- // The stack header is read.
- for (unsigned i = 0; i < JSStack::ThisArgument; ++i)
- m_read(VirtualRegister(i));
-
- // Read all of the inline arguments and call frame headers that we didn't already capture.
- for (InlineCallFrame* inlineCallFrame = m_node->origin.semantic.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) {
- for (unsigned i = inlineCallFrame->arguments.size(); i-- > 1;)
- m_read(VirtualRegister(inlineCallFrame->stackOffset + virtualRegisterForArgument(i).offset()));
- if (inlineCallFrame->isClosureCall)
- m_read(VirtualRegister(inlineCallFrame->stackOffset + JSStack::Callee));
- if (inlineCallFrame->isVarargs())
- m_read(VirtualRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount));
- }
- break;
- } }
- }
-
- Graph& m_graph;
- Node* m_node;
- const ReadFunctor& m_read;
- const WriteFunctor& m_write;
- const DefFunctor& m_def;
-};
-
-template<typename ReadFunctor, typename WriteFunctor, typename DefFunctor>
-void preciseLocalClobberize(
- Graph& graph, Node* node,
- const ReadFunctor& read, const WriteFunctor& write, const DefFunctor& def)
-{
- PreciseLocalClobberizeAdaptor<ReadFunctor, WriteFunctor, DefFunctor>
- adaptor(graph, node, read, write, def);
- clobberize(graph, node, adaptor);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGPreciseLocalClobberize_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.cpp b/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.cpp
index d9a39f90f..21da5fe0a 100644
--- a/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.cpp
@@ -31,7 +31,7 @@
#include "DFGBasicBlockInlines.h"
#include "DFGGraph.h"
#include "DFGPhase.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.h b/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.h
index 232f8bfe5..00f04a3d3 100644
--- a/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGPredictionInjectionPhase_h
#define DFGPredictionInjectionPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
index 36f2df7c7..d859849a3 100644
--- a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,7 +30,7 @@
#include "DFGGraph.h"
#include "DFGPhase.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -56,31 +56,9 @@ public:
{
ASSERT(m_graph.m_form == ThreadedCPS);
ASSERT(m_graph.m_unificationState == GloballyUnified);
-
- propagateThroughArgumentPositions();
-
- m_pass = PrimaryPass;
- propagateToFixpoint();
-
- m_pass = RareCasePass;
- propagateToFixpoint();
- m_pass = DoubleVotingPass;
- do {
- m_changed = false;
- doRoundOfDoubleVoting();
- if (!m_changed)
- break;
- m_changed = false;
- propagateForward();
- } while (m_changed);
-
- return true;
- }
-
-private:
- void propagateToFixpoint()
- {
+ // 1) propagate predictions
+
do {
m_changed = false;
@@ -97,8 +75,22 @@ private:
m_changed = false;
propagateBackward();
} while (m_changed);
+
+ // 2) repropagate predictions while doing double voting.
+
+ do {
+ m_changed = false;
+ doRoundOfDoubleVoting();
+ if (!m_changed)
+ break;
+ m_changed = false;
+ propagateForward();
+ } while (m_changed);
+
+ return true;
}
+private:
bool setPrediction(SpeculatedType prediction)
{
ASSERT(m_currentNode->hasResult());
@@ -121,14 +113,11 @@ private:
SpeculatedType speculatedDoubleTypeForPrediction(SpeculatedType value)
{
- SpeculatedType result = SpecDoubleReal;
- if (value & SpecDoubleImpureNaN)
- result |= SpecDoubleImpureNaN;
- if (value & SpecDoublePureNaN)
- result |= SpecDoublePureNaN;
- if (!isFullNumberOrBooleanSpeculation(value))
- result |= SpecDoublePureNaN;
- return result;
+ if (!isFullNumberSpeculation(value))
+ return SpecDouble;
+ if (value & SpecDoubleNaN)
+ return SpecDouble;
+ return SpecDoubleReal;
}
SpeculatedType speculatedDoubleTypeForPredictions(SpeculatedType left, SpeculatedType right)
@@ -143,23 +132,19 @@ private:
bool changed = false;
switch (op) {
- case JSConstant: {
- SpeculatedType type = speculationFromValue(node->asJSValue());
- if (type == SpecInt52AsDouble && enableInt52())
+ case JSConstant:
+ case WeakJSConstant: {
+ SpeculatedType type = speculationFromValue(m_graph.valueOfJSConstant(node));
+ if (type == SpecInt52AsDouble)
type = SpecInt52;
changed |= setPrediction(type);
break;
}
- case DoubleConstant: {
- SpeculatedType type = speculationFromValue(node->asJSValue());
- changed |= setPrediction(type);
- break;
- }
case GetLocal: {
VariableAccessData* variable = node->variableAccessData();
SpeculatedType prediction = variable->prediction();
- if (!variable->couldRepresentInt52() && (prediction & SpecInt52))
+ if (variable->shouldNeverUnbox() && (prediction & SpecInt52))
prediction = (prediction | SpecInt52AsDouble) & ~SpecInt52;
if (prediction)
changed |= mergePrediction(prediction);
@@ -178,8 +163,7 @@ private:
case BitRShift:
case BitLShift:
case BitURShift:
- case ArithIMul:
- case ArithClz32: {
+ case ArithIMul: {
changed |= setPrediction(SpecInt32);
break;
}
@@ -190,40 +174,15 @@ private:
case RegExpTest:
case GetById:
case GetByIdFlush:
+ case GetMyArgumentByValSafe:
case GetByOffset:
- case MultiGetByOffset:
- case GetDirectPname:
case Call:
case Construct:
- case CallVarargs:
- case ConstructVarargs:
- case CallForwardVarargs:
- case ConstructForwardVarargs:
case GetGlobalVar:
- case GetClosureVar:
- case GetFromArguments: {
+ case GetClosureVar: {
changed |= setPrediction(node->getHeapPrediction());
break;
}
-
- case GetGetterSetterByOffset:
- case GetExecutable: {
- changed |= setPrediction(SpecCellOther);
- break;
- }
-
- case GetGetter:
- case GetSetter:
- case GetCallee:
- case NewFunction: {
- changed |= setPrediction(SpecFunction);
- break;
- }
-
- case GetArgumentCount: {
- changed |= setPrediction(SpecInt32);
- break;
- }
case StringCharCodeAt: {
changed |= setPrediction(SpecInt32);
@@ -233,7 +192,7 @@ private:
case UInt32ToNumber: {
// FIXME: Support Int52.
// https://bugs.webkit.org/show_bug.cgi?id=125704
- if (node->canSpeculateInt32(m_pass))
+ if (nodeCanSpeculateInt32(node->arithNodeFlags()))
changed |= mergePrediction(SpecInt32);
else
changed |= mergePrediction(SpecBytecodeNumber);
@@ -245,32 +204,43 @@ private:
SpeculatedType right = node->child2()->prediction();
if (left && right) {
- if (isFullNumberOrBooleanSpeculationExpectingDefined(left)
- && isFullNumberOrBooleanSpeculationExpectingDefined(right)) {
- if (m_graph.addSpeculationMode(node, m_pass) != DontSpeculateInt32)
+ if (isFullNumberSpeculationExpectingDefined(left) && isFullNumberSpeculationExpectingDefined(right)) {
+ if (m_graph.addSpeculationMode(node) != DontSpeculateInt32)
changed |= mergePrediction(SpecInt32);
else if (m_graph.addShouldSpeculateMachineInt(node))
changed |= mergePrediction(SpecInt52);
else
changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right));
- } else if (
- !(left & (SpecFullNumber | SpecBoolean))
- || !(right & (SpecFullNumber | SpecBoolean))) {
+ } else if (!(left & SpecFullNumber) || !(right & SpecFullNumber)) {
// left or right is definitely something other than a number.
changed |= mergePrediction(SpecString);
} else
- changed |= mergePrediction(SpecString | SpecInt32 | SpecBytecodeDouble);
+ changed |= mergePrediction(SpecString | SpecInt32 | SpecDouble);
}
break;
}
-
- case ArithAdd:
+
+ case ArithAdd: {
+ SpeculatedType left = node->child1()->prediction();
+ SpeculatedType right = node->child2()->prediction();
+
+ if (left && right) {
+ if (m_graph.addSpeculationMode(node) != DontSpeculateInt32)
+ changed |= mergePrediction(SpecInt32);
+ else if (m_graph.addShouldSpeculateMachineInt(node))
+ changed |= mergePrediction(SpecInt52);
+ else
+ changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right));
+ }
+ break;
+ }
+
case ArithSub: {
SpeculatedType left = node->child1()->prediction();
SpeculatedType right = node->child2()->prediction();
if (left && right) {
- if (m_graph.addSpeculationMode(node, m_pass) != DontSpeculateInt32)
+ if (m_graph.addSpeculationMode(node) != DontSpeculateInt32)
changed |= mergePrediction(SpecInt32);
else if (m_graph.addShouldSpeculateMachineInt(node))
changed |= mergePrediction(SpecInt52);
@@ -282,9 +252,9 @@ private:
case ArithNegate:
if (node->child1()->prediction()) {
- if (m_graph.negateShouldSpeculateInt32(node, m_pass))
+ if (m_graph.negateShouldSpeculateInt32(node))
changed |= mergePrediction(SpecInt32);
- else if (m_graph.negateShouldSpeculateMachineInt(node, m_pass))
+ else if (m_graph.negateShouldSpeculateMachineInt(node))
changed |= mergePrediction(SpecInt52);
else
changed |= mergePrediction(speculatedDoubleTypeForPrediction(node->child1()->prediction()));
@@ -297,8 +267,8 @@ private:
SpeculatedType right = node->child2()->prediction();
if (left && right) {
- if (Node::shouldSpeculateInt32OrBooleanForArithmetic(node->child1().node(), node->child2().node())
- && node->canSpeculateInt32(m_pass))
+ if (Node::shouldSpeculateInt32ForArithmetic(node->child1().node(), node->child2().node())
+ && nodeCanSpeculateInt32(node->arithNodeFlags()))
changed |= mergePrediction(SpecInt32);
else
changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right));
@@ -311,53 +281,55 @@ private:
SpeculatedType right = node->child2()->prediction();
if (left && right) {
- if (m_graph.mulShouldSpeculateInt32(node, m_pass))
+ if (m_graph.mulShouldSpeculateInt32(node))
changed |= mergePrediction(SpecInt32);
- else if (m_graph.mulShouldSpeculateMachineInt(node, m_pass))
+ else if (m_graph.mulShouldSpeculateMachineInt(node))
changed |= mergePrediction(SpecInt52);
else
changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right));
}
break;
}
-
- case ArithDiv:
+
+ case ArithDiv: {
+ SpeculatedType left = node->child1()->prediction();
+ SpeculatedType right = node->child2()->prediction();
+
+ if (left && right) {
+ if (Node::shouldSpeculateInt32ForArithmetic(node->child1().node(), node->child2().node())
+ && nodeCanSpeculateInt32(node->arithNodeFlags()))
+ changed |= mergePrediction(SpecInt32);
+ else
+ changed |= mergePrediction(SpecDouble);
+ }
+ break;
+ }
+
case ArithMod: {
SpeculatedType left = node->child1()->prediction();
SpeculatedType right = node->child2()->prediction();
if (left && right) {
- if (Node::shouldSpeculateInt32OrBooleanForArithmetic(node->child1().node(), node->child2().node())
- && node->canSpeculateInt32(m_pass))
+ if (Node::shouldSpeculateInt32ForArithmetic(node->child1().node(), node->child2().node())
+ && nodeCanSpeculateInt32(node->arithNodeFlags()))
changed |= mergePrediction(SpecInt32);
else
- changed |= mergePrediction(SpecBytecodeDouble);
+ changed |= mergePrediction(SpecDouble);
}
break;
}
-
- case ArithPow:
+
case ArithSqrt:
- case ArithFRound:
case ArithSin:
- case ArithCos:
- case ArithLog: {
- changed |= setPrediction(SpecBytecodeDouble);
- break;
- }
-
- case ArithRound: {
- if (isInt32OrBooleanSpeculation(node->getHeapPrediction()) && m_graph.roundShouldSpeculateInt32(node, m_pass))
- changed |= setPrediction(SpecInt32);
- else
- changed |= setPrediction(SpecBytecodeDouble);
+ case ArithCos: {
+ changed |= setPrediction(SpecDouble);
break;
}
-
+
case ArithAbs: {
SpeculatedType child = node->child1()->prediction();
- if (isInt32OrBooleanSpeculationForArithmetic(child)
- && node->canSpeculateInt32(m_pass))
+ if (isInt32SpeculationForArithmetic(child)
+ && nodeCanSpeculateInt32(node->arithNodeFlags()))
changed |= mergePrediction(SpecInt32);
else
changed |= mergePrediction(speculatedDoubleTypeForPrediction(child));
@@ -372,72 +344,48 @@ private:
case CompareEq:
case CompareEqConstant:
case CompareStrictEq:
+ case CompareStrictEqConstant:
case InstanceOf:
case IsUndefined:
case IsBoolean:
case IsNumber:
case IsString:
case IsObject:
- case IsObjectOrNull:
case IsFunction: {
changed |= setPrediction(SpecBoolean);
break;
}
case TypeOf: {
- changed |= setPrediction(SpecStringIdent);
+ changed |= setPrediction(SpecString);
break;
}
case GetByVal: {
if (!node->child1()->prediction())
break;
-
- ArrayMode arrayMode = node->arrayMode().refine(
- m_graph, node,
- node->child1()->prediction(),
- node->child2()->prediction(),
- SpecNone);
-
- switch (arrayMode.type()) {
- case Array::Int32:
- if (arrayMode.isOutOfBounds())
- changed |= mergePrediction(node->getHeapPrediction() | SpecInt32);
- else
- changed |= mergePrediction(SpecInt32);
+ if (!node->getHeapPrediction())
break;
- case Array::Double:
- if (arrayMode.isOutOfBounds())
- changed |= mergePrediction(node->getHeapPrediction() | SpecDoubleReal);
- else
- changed |= mergePrediction(SpecDoubleReal);
- break;
- case Array::Float32Array:
- case Array::Float64Array:
- changed |= mergePrediction(SpecFullDouble);
- break;
- case Array::Uint32Array:
- if (isInt32SpeculationForArithmetic(node->getHeapPrediction()))
+
+ if (node->child1()->shouldSpeculateFloat32Array()
+ || node->child1()->shouldSpeculateFloat64Array())
+ changed |= mergePrediction(SpecDouble);
+ else if (node->child1()->shouldSpeculateUint32Array()) {
+ if (isInt32Speculation(node->getHeapPrediction()))
changed |= mergePrediction(SpecInt32);
- else if (enableInt52())
- changed |= mergePrediction(SpecMachineInt);
else
- changed |= mergePrediction(SpecInt32 | SpecInt52AsDouble);
- break;
- case Array::Int8Array:
- case Array::Uint8Array:
- case Array::Int16Array:
- case Array::Uint16Array:
- case Array::Int32Array:
- changed |= mergePrediction(SpecInt32);
- break;
- default:
+ changed |= mergePrediction(SpecInt52);
+ } else
changed |= mergePrediction(node->getHeapPrediction());
- break;
- }
break;
}
+ case GetMyArgumentsLengthSafe: {
+ changed |= setPrediction(SpecInt32);
+ break;
+ }
+
+ case GetClosureRegisters:
case GetButterfly:
case GetIndexedPropertyStorage:
case AllocatePropertyStorage:
@@ -458,11 +406,18 @@ private:
break;
}
+ case GetMyScope:
+ case SkipTopScope:
case SkipScope: {
changed |= setPrediction(SpecObjectOther);
break;
}
+ case GetCallee: {
+ changed |= setPrediction(SpecFunction);
+ break;
+ }
+
case CreateThis:
case NewObject: {
changed |= setPrediction(SpecFinalObject);
@@ -493,7 +448,6 @@ private:
break;
}
case StringCharAt:
- case CallStringConstructor:
case ToString:
case MakeRope: {
changed |= setPrediction(SpecString);
@@ -512,66 +466,50 @@ private:
break;
}
- case CreateDirectArguments: {
- changed |= setPrediction(SpecDirectArguments);
+ case CreateArguments: {
+ changed |= setPrediction(SpecArguments);
break;
}
- case CreateScopedArguments: {
- changed |= setPrediction(SpecScopedArguments);
+ case NewFunction: {
+ SpeculatedType child = node->child1()->prediction();
+ if (child & SpecEmpty)
+ changed |= mergePrediction((child & ~SpecEmpty) | SpecFunction);
+ else
+ changed |= mergePrediction(child);
break;
}
- case CreateClonedArguments: {
- changed |= setPrediction(SpecObjectOther);
+ case NewFunctionNoCheck:
+ case NewFunctionExpression: {
+ changed |= setPrediction(SpecFunction);
break;
}
- case FiatInt52: {
- RELEASE_ASSERT(enableInt52());
- changed |= setPrediction(SpecMachineInt);
- break;
- }
-
case PutByValAlias:
case GetArrayLength:
case GetTypedArrayByteOffset:
+ case Int32ToDouble:
case DoubleAsInt32:
case GetLocalUnlinked:
+ case GetMyArgumentsLength:
+ case GetMyArgumentByVal:
+ case PhantomPutStructure:
+ case PhantomArguments:
case CheckArray:
case Arrayify:
case ArrayifyToStructure:
case CheckTierUpInLoop:
case CheckTierUpAtReturn:
case CheckTierUpAndOSREnter:
- case CheckTierUpWithNestedTriggerAndOSREnter:
case InvalidationPoint:
+ case Int52ToValue:
+ case Int52ToDouble:
case CheckInBounds:
- case ValueToInt32:
- case DoubleRep:
- case ValueRep:
- case Int52Rep:
- case Int52Constant:
- case Identity:
- case BooleanToNumber:
- case PhantomNewObject:
- case PhantomNewFunction:
- case PhantomCreateActivation:
- case PhantomDirectArguments:
- case PhantomClonedArguments:
- case GetMyArgumentByVal:
- case ForwardVarargs:
- case PutHint:
- case CheckStructureImmediate:
- case MaterializeNewObject:
- case MaterializeCreateActivation:
- case PutStack:
- case KillStack:
- case StoreBarrier:
- case GetStack: {
+ case ValueToInt32: {
// This node should never be visible at this stage of compilation. It is
// inserted by fixup(), which follows this phase.
- DFG_CRASH(m_graph, node, "Unexpected node during prediction propagation");
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
@@ -582,6 +520,7 @@ private:
break;
case Upsilon:
+ case GetArgument:
// These don't get inserted until we go into SSA.
RELEASE_ASSERT_NOT_REACHED();
break;
@@ -594,65 +533,44 @@ private:
changed |= setPrediction(SpecBoolean);
break;
- case GetEnumerableLength: {
- changed |= setPrediction(SpecInt32);
- break;
- }
- case HasGenericProperty:
- case HasStructureProperty:
- case HasIndexedProperty: {
- changed |= setPrediction(SpecBoolean);
- break;
- }
- case GetPropertyEnumerator: {
- changed |= setPrediction(SpecCell);
- break;
- }
- case GetEnumeratorStructurePname: {
- changed |= setPrediction(SpecCell | SpecOther);
- break;
- }
- case GetEnumeratorGenericPname: {
- changed |= setPrediction(SpecCell | SpecOther);
- break;
- }
- case ToIndexString: {
- changed |= setPrediction(SpecString);
+ case Identity:
+ changed |= mergePrediction(node->child1()->prediction());
break;
- }
#ifndef NDEBUG
// These get ignored because they don't return anything.
+ case StoreBarrier:
+ case ConditionalStoreBarrier:
+ case StoreBarrierWithNullCheck:
case PutByValDirect:
case PutByVal:
case PutClosureVar:
- case PutToArguments:
case Return:
case Throw:
case PutById:
- case PutByIdFlush:
case PutByIdDirect:
case PutByOffset:
- case MultiPutByOffset:
case DFG::Jump:
case Branch:
case Switch:
case Breakpoint:
case ProfileWillCall:
case ProfileDidCall:
- case ProfileType:
- case ProfileControlFlow:
case CheckHasInstance:
case ThrowReferenceError:
case ForceOSRExit:
case SetArgument:
case CheckStructure:
- case CheckCell:
- case CheckNotEmpty:
- case CheckIdent:
- case CheckBadCell:
+ case CheckExecutable:
+ case StructureTransitionWatchpoint:
+ case CheckFunction:
case PutStructure:
+ case TearOffActivation:
+ case TearOffArguments:
+ case CheckArgumentsNotCreated:
+ case VariableWatchpoint:
case VarInjectionWatchpoint:
+ case AllocationProfileWatchpoint:
case Phantom:
case Check:
case PutGlobalVar:
@@ -660,14 +578,11 @@ private:
case Unreachable:
case LoopHint:
case NotifyWrite:
+ case FunctionReentryWatchpoint:
+ case TypedArrayWatchpoint:
case ConstantStoragePointer:
case MovHint:
case ZombieHint:
- case LoadVarargs:
- break;
-
- // This gets ignored because it only pretends to produce a value.
- case BottomValue:
break;
// This gets ignored because it already has a prediction.
@@ -720,14 +635,8 @@ private:
}
}
- void doDoubleVoting(Node* node, float weight)
+ void doDoubleVoting(Node* node)
{
- // Loop pre-headers created by OSR entrypoint creation may have NaN weight to indicate
- // that we actually don't know they weight. Assume that they execute once. This turns
- // out to be an OK assumption since the pre-header doesn't have any meaningful code.
- if (weight != weight)
- weight = 1;
-
switch (node->op()) {
case ValueAdd:
case ArithAdd:
@@ -737,16 +646,15 @@ private:
DoubleBallot ballot;
- if (isFullNumberSpeculation(left)
- && isFullNumberSpeculation(right)
- && !m_graph.addShouldSpeculateInt32(node, m_pass)
+ if (isFullNumberSpeculationExpectingDefined(left) && isFullNumberSpeculationExpectingDefined(right)
+ && !m_graph.addShouldSpeculateInt32(node)
&& !m_graph.addShouldSpeculateMachineInt(node))
ballot = VoteDouble;
else
ballot = VoteValue;
- m_graph.voteNode(node->child1(), ballot, weight);
- m_graph.voteNode(node->child2(), ballot, weight);
+ m_graph.voteNode(node->child1(), ballot);
+ m_graph.voteNode(node->child2(), ballot);
break;
}
@@ -756,16 +664,15 @@ private:
DoubleBallot ballot;
- if (isFullNumberSpeculation(left)
- && isFullNumberSpeculation(right)
- && !m_graph.mulShouldSpeculateInt32(node, m_pass)
- && !m_graph.mulShouldSpeculateMachineInt(node, m_pass))
+ if (isFullNumberSpeculation(left) && isFullNumberSpeculation(right)
+ && !m_graph.mulShouldSpeculateInt32(node)
+ && !m_graph.mulShouldSpeculateMachineInt(node))
ballot = VoteDouble;
else
ballot = VoteValue;
- m_graph.voteNode(node->child1(), ballot, weight);
- m_graph.voteNode(node->child2(), ballot, weight);
+ m_graph.voteNode(node->child1(), ballot);
+ m_graph.voteNode(node->child2(), ballot);
break;
}
@@ -778,47 +685,41 @@ private:
DoubleBallot ballot;
- if (isFullNumberSpeculation(left)
- && isFullNumberSpeculation(right)
- && !(Node::shouldSpeculateInt32OrBooleanForArithmetic(node->child1().node(), node->child2().node()) && node->canSpeculateInt32(m_pass)))
+ if (isFullNumberSpeculation(left) && isFullNumberSpeculation(right)
+ && !(Node::shouldSpeculateInt32ForArithmetic(node->child1().node(), node->child2().node()) && node->canSpeculateInt32()))
ballot = VoteDouble;
else
ballot = VoteValue;
- m_graph.voteNode(node->child1(), ballot, weight);
- m_graph.voteNode(node->child2(), ballot, weight);
+ m_graph.voteNode(node->child1(), ballot);
+ m_graph.voteNode(node->child2(), ballot);
break;
}
case ArithAbs:
DoubleBallot ballot;
- if (node->child1()->shouldSpeculateNumber()
- && !(node->child1()->shouldSpeculateInt32OrBooleanForArithmetic() && node->canSpeculateInt32(m_pass)))
+ if (!(node->child1()->shouldSpeculateInt32ForArithmetic() && node->canSpeculateInt32()))
ballot = VoteDouble;
else
ballot = VoteValue;
- m_graph.voteNode(node->child1(), ballot, weight);
+ m_graph.voteNode(node->child1(), ballot);
break;
case ArithSqrt:
case ArithCos:
case ArithSin:
- case ArithLog:
- if (node->child1()->shouldSpeculateNumber())
- m_graph.voteNode(node->child1(), VoteDouble, weight);
- else
- m_graph.voteNode(node->child1(), VoteValue, weight);
+ m_graph.voteNode(node->child1(), VoteDouble);
break;
case SetLocal: {
SpeculatedType prediction = node->child1()->prediction();
if (isDoubleSpeculation(prediction))
- node->variableAccessData()->vote(VoteDouble, weight);
+ node->variableAccessData()->vote(VoteDouble);
else if (
!isFullNumberSpeculation(prediction)
|| isInt32Speculation(prediction) || isMachineIntSpeculation(prediction))
- node->variableAccessData()->vote(VoteValue, weight);
+ node->variableAccessData()->vote(VoteValue);
break;
}
@@ -828,14 +729,14 @@ private:
Edge child1 = m_graph.varArgChild(node, 0);
Edge child2 = m_graph.varArgChild(node, 1);
Edge child3 = m_graph.varArgChild(node, 2);
- m_graph.voteNode(child1, VoteValue, weight);
- m_graph.voteNode(child2, VoteValue, weight);
+ m_graph.voteNode(child1, VoteValue);
+ m_graph.voteNode(child2, VoteValue);
switch (node->arrayMode().type()) {
case Array::Double:
- m_graph.voteNode(child3, VoteDouble, weight);
+ m_graph.voteNode(child3, VoteDouble);
break;
default:
- m_graph.voteNode(child3, VoteValue, weight);
+ m_graph.voteNode(child3, VoteValue);
break;
}
break;
@@ -846,7 +747,7 @@ private:
break;
default:
- m_graph.voteChildren(node, VoteValue, weight);
+ m_graph.voteChildren(node, VoteValue);
break;
}
}
@@ -862,7 +763,7 @@ private:
ASSERT(block->isReachable);
for (unsigned i = 0; i < block->size(); ++i) {
m_currentNode = block->at(i);
- doDoubleVoting(m_currentNode, block->executionCount);
+ doDoubleVoting(m_currentNode);
}
}
for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) {
@@ -871,7 +772,8 @@ private:
continue;
m_changed |= variableAccessData->tallyVotesForShouldUseDoubleFormat();
}
- propagateThroughArgumentPositions();
+ for (unsigned i = 0; i < m_graph.m_argumentPositions.size(); ++i)
+ m_changed |= m_graph.m_argumentPositions[i].mergeArgumentPredictionAwareness();
for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) {
VariableAccessData* variableAccessData = &m_graph.m_variableAccessData[i];
if (!variableAccessData->isRoot())
@@ -880,15 +782,8 @@ private:
}
}
- void propagateThroughArgumentPositions()
- {
- for (unsigned i = 0; i < m_graph.m_argumentPositions.size(); ++i)
- m_changed |= m_graph.m_argumentPositions[i].mergeArgumentPredictionAwareness();
- }
-
Node* m_currentNode;
bool m_changed;
- PredictionPass m_pass; // We use different logic for considering predictions depending on how far along we are in propagation.
};
bool performPredictionPropagation(Graph& graph)
diff --git a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h
index 082295f32..29fe8455e 100644
--- a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGPredictionPropagationPhase_h
#define DFGPredictionPropagationPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "SpeculatedType.h"
@@ -46,7 +48,7 @@ class Graph;
bool performPredictionPropagation(Graph&);
// Helper used for FixupPhase for computing the predicted type of a ToPrimitive.
-SpeculatedType resultOfToPrimitive(SpeculatedType);
+SpeculatedType resultOfToPrimitive(SpeculatedType type);
} } // namespace JSC::DFG::Phase
diff --git a/Source/JavaScriptCore/dfg/DFGPromoteHeapAccess.h b/Source/JavaScriptCore/dfg/DFGPromoteHeapAccess.h
deleted file mode 100644
index 2bf2875c1..000000000
--- a/Source/JavaScriptCore/dfg/DFGPromoteHeapAccess.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGPromoteHeapAccess_h
-#define DFGPromoteHeapAccess_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGNode.h"
-#include "DFGPromotedHeapLocation.h"
-
-namespace JSC { namespace DFG {
-
-template<typename WriteFunctor, typename ReadFunctor>
-void promoteHeapAccess(Node* node, const WriteFunctor& write, const ReadFunctor& read)
-{
- switch (node->op()) {
- case CheckStructure: {
- if (node->child1()->isPhantomObjectAllocation())
- read(PromotedHeapLocation(StructurePLoc, node->child1()));
- break;
- }
-
- case GetByOffset:
- case GetGetterSetterByOffset: {
- if (node->child2()->isPhantomObjectAllocation()) {
- unsigned identifierNumber = node->storageAccessData().identifierNumber;
- read(PromotedHeapLocation(NamedPropertyPLoc, node->child2(), identifierNumber));
- }
- break;
- }
-
- case MultiGetByOffset: {
- if (node->child1()->isPhantomObjectAllocation()) {
- unsigned identifierNumber = node->multiGetByOffsetData().identifierNumber;
- read(PromotedHeapLocation(NamedPropertyPLoc, node->child1(), identifierNumber));
- }
- break;
- }
-
- case GetClosureVar:
- if (node->child1()->isPhantomActivationAllocation())
- read(PromotedHeapLocation(ClosureVarPLoc, node->child1(), node->scopeOffset().offset()));
- break;
-
- case SkipScope:
- if (node->child1()->isPhantomActivationAllocation())
- read(PromotedHeapLocation(ActivationScopePLoc, node->child1()));
- break;
-
- case GetScope:
- if (node->child1()->isPhantomFunctionAllocation())
- read(PromotedHeapLocation(FunctionActivationPLoc, node->child1()));
- break;
-
- case GetExecutable:
- if (node->child1()->isPhantomFunctionAllocation())
- read(PromotedHeapLocation(FunctionExecutablePLoc, node->child1()));
- break;
-
- case PutHint: {
- ASSERT(node->child1()->isPhantomAllocation());
- write(
- PromotedHeapLocation(node->child1().node(), node->promotedLocationDescriptor()),
- node->child2());
- break;
- }
-
- default:
- break;
- }
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGPromoteHeapAccess_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.cpp b/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.cpp
deleted file mode 100644
index 24f69770e..000000000
--- a/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.cpp
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGPromotedHeapLocation.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGGraph.h"
-#include "JSCInlines.h"
-
-namespace JSC { namespace DFG {
-
-void PromotedLocationDescriptor::dump(PrintStream& out) const
-{
- out.print(m_kind, "(", m_info, ")");
-}
-
-Node* PromotedHeapLocation::createHint(Graph& graph, NodeOrigin origin, Node* value)
-{
- return graph.addNode(
- SpecNone, PutHint, origin, OpInfo(descriptor().imm1()), OpInfo(descriptor().imm2()),
- base()->defaultEdge(), value->defaultEdge());
-}
-
-void PromotedHeapLocation::dump(PrintStream& out) const
-{
- out.print(kind(), "(", m_base, ", ", info(), ")");
-}
-
-} } // namespace JSC::DFG
-
-namespace WTF {
-
-using namespace JSC::DFG;
-
-void printInternal(PrintStream& out, PromotedLocationKind kind)
-{
- switch (kind) {
- case InvalidPromotedLocationKind:
- out.print("InvalidPromotedLocationKind");
- return;
-
- case StructurePLoc:
- out.print("StructurePLoc");
- return;
-
- case ActivationSymbolTablePLoc:
- out.print("ActivationSymbolTablePLoc");
- return;
-
- case NamedPropertyPLoc:
- out.print("NamedPropertyPLoc");
- return;
-
- case ArgumentPLoc:
- out.print("ArgumentPLoc");
- return;
-
- case ArgumentCountPLoc:
- out.print("ArgumentCountPLoc");
- return;
-
- case ArgumentsCalleePLoc:
- out.print("ArgumentsCalleePLoc");
- return;
-
- case FunctionExecutablePLoc:
- out.print("FunctionExecutablePLoc");
- return;
-
- case FunctionActivationPLoc:
- out.print("FunctionActivationPLoc");
- return;
-
- case ActivationScopePLoc:
- out.print("ActivationScopePLoc");
- return;
-
- case ClosureVarPLoc:
- out.print("ClosureVarPLoc");
- return;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
-}
-
-} // namespace WTF;
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.h b/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.h
deleted file mode 100644
index a21e4da1a..000000000
--- a/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.h
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGPromotedHeapLocation_h
-#define DFGPromotedHeapLocation_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGNode.h"
-#include <wtf/PrintStream.h>
-
-namespace JSC { namespace DFG {
-
-enum PromotedLocationKind {
- InvalidPromotedLocationKind,
-
- StructurePLoc,
- ActivationSymbolTablePLoc,
- NamedPropertyPLoc,
- ArgumentPLoc,
- ArgumentCountPLoc,
- ArgumentsCalleePLoc,
-
- FunctionExecutablePLoc,
- FunctionActivationPLoc,
- ActivationScopePLoc,
- ClosureVarPLoc,
-};
-
-class PromotedLocationDescriptor {
-public:
- PromotedLocationDescriptor(
- PromotedLocationKind kind = InvalidPromotedLocationKind, unsigned info = 0)
- : m_kind(kind)
- , m_info(info)
- {
- }
-
- PromotedLocationDescriptor(WTF::HashTableDeletedValueType)
- : m_kind(InvalidPromotedLocationKind)
- , m_info(1)
- {
- }
-
- bool operator!() const { return m_kind == InvalidPromotedLocationKind; }
-
- explicit operator bool() const { return !!*this; }
-
- PromotedLocationKind kind() const { return m_kind; }
- unsigned info() const { return m_info; }
-
- OpInfo imm1() const { return OpInfo(static_cast<uint32_t>(m_kind)); }
- OpInfo imm2() const { return OpInfo(static_cast<uint32_t>(m_info)); }
-
- unsigned hash() const
- {
- return m_kind + m_info;
- }
-
- bool operator==(const PromotedLocationDescriptor& other) const
- {
- return m_kind == other.m_kind
- && m_info == other.m_info;
- }
-
- bool operator!=(const PromotedLocationDescriptor& other) const
- {
- return !(*this == other);
- }
-
- bool isHashTableDeletedValue() const
- {
- return m_kind == InvalidPromotedLocationKind && m_info;
- }
-
- bool neededForMaterialization() const
- {
- switch (kind()) {
- case NamedPropertyPLoc:
- case ClosureVarPLoc:
- return false;
-
- default:
- return true;
- }
- }
-
- void dump(PrintStream& out) const;
-
-private:
- PromotedLocationKind m_kind;
- unsigned m_info;
-};
-
-struct PromotedLocationDescriptorHash {
- static unsigned hash(const PromotedLocationDescriptor& key) { return key.hash(); }
- static bool equal(const PromotedLocationDescriptor& a, const PromotedLocationDescriptor& b) { return a == b; }
- static const bool safeToCompareToEmptyOrDeleted = true;
-};
-
-class PromotedHeapLocation {
-public:
- PromotedHeapLocation(
- PromotedLocationKind kind = InvalidPromotedLocationKind,
- Node* base = nullptr, unsigned info = 0)
- : m_base(base)
- , m_meta(kind, info)
- {
- }
-
- PromotedHeapLocation(
- PromotedLocationKind kind, Edge base, unsigned info = 0)
- : PromotedHeapLocation(kind, base.node(), info)
- {
- }
-
- PromotedHeapLocation(Node* base, PromotedLocationDescriptor meta)
- : m_base(base)
- , m_meta(meta)
- {
- }
-
- PromotedHeapLocation(WTF::HashTableDeletedValueType)
- : m_base(nullptr)
- , m_meta(InvalidPromotedLocationKind, 1)
- {
- }
-
- Node* createHint(Graph&, NodeOrigin, Node* value);
-
- bool operator!() const { return kind() == InvalidPromotedLocationKind; }
-
- PromotedLocationKind kind() const { return m_meta.kind(); }
- Node* base() const { return m_base; }
- unsigned info() const { return m_meta.info(); }
- PromotedLocationDescriptor descriptor() const { return m_meta; }
-
- unsigned hash() const
- {
- return m_meta.hash() + WTF::PtrHash<Node*>::hash(m_base);
- }
-
- bool operator==(const PromotedHeapLocation& other) const
- {
- return m_base == other.m_base
- && m_meta == other.m_meta;
- }
-
- bool isHashTableDeletedValue() const
- {
- return m_meta.isHashTableDeletedValue();
- }
-
- void dump(PrintStream& out) const;
-
-private:
- Node* m_base;
- PromotedLocationDescriptor m_meta;
-};
-
-struct PromotedHeapLocationHash {
- static unsigned hash(const PromotedHeapLocation& key) { return key.hash(); }
- static bool equal(const PromotedHeapLocation& a, const PromotedHeapLocation& b) { return a == b; }
- static const bool safeToCompareToEmptyOrDeleted = true;
-};
-
-} } // namespace JSC::DFG
-
-namespace WTF {
-
-void printInternal(PrintStream&, JSC::DFG::PromotedLocationKind);
-
-template<typename T> struct DefaultHash;
-template<> struct DefaultHash<JSC::DFG::PromotedHeapLocation> {
- typedef JSC::DFG::PromotedHeapLocationHash Hash;
-};
-
-template<typename T> struct HashTraits;
-template<> struct HashTraits<JSC::DFG::PromotedHeapLocation> : SimpleClassHashTraits<JSC::DFG::PromotedHeapLocation> {
- static const bool emptyValueIsZero = false;
-};
-
-template<typename T> struct DefaultHash;
-template<> struct DefaultHash<JSC::DFG::PromotedLocationDescriptor> {
- typedef JSC::DFG::PromotedLocationDescriptorHash Hash;
-};
-
-template<typename T> struct HashTraits;
-template<> struct HashTraits<JSC::DFG::PromotedLocationDescriptor> : SimpleClassHashTraits<JSC::DFG::PromotedLocationDescriptor> {
- static const bool emptyValueIsZero = false;
-};
-
-} // namespace WTF
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGPromotedHeapLocation_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGPureValue.cpp b/Source/JavaScriptCore/dfg/DFGPureValue.cpp
deleted file mode 100644
index 4c9f60c06..000000000
--- a/Source/JavaScriptCore/dfg/DFGPureValue.cpp
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGPureValue.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGGraph.h"
-
-namespace JSC { namespace DFG {
-
-void PureValue::dump(PrintStream& out) const
-{
- out.print(Graph::opName(op()));
- out.print("(");
- CommaPrinter comma;
- for (unsigned i = 0; i < AdjacencyList::Size; ++i) {
- if (children().child(i))
- out.print(comma, children().child(i));
- }
- if (m_info)
- out.print(comma, m_info);
- out.print(")");
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGPureValue.h b/Source/JavaScriptCore/dfg/DFGPureValue.h
deleted file mode 100644
index e7d6a3db4..000000000
--- a/Source/JavaScriptCore/dfg/DFGPureValue.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGPureValue_h
-#define DFGPureValue_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGNode.h"
-
-namespace JSC { namespace DFG {
-
-class PureValue {
-public:
- PureValue()
- : m_op(LastNodeType)
- , m_info(0)
- {
- }
-
- PureValue(NodeType op, const AdjacencyList& children, uintptr_t info)
- : m_op(op)
- , m_children(children.sanitized())
- , m_info(info)
- {
- ASSERT(!(defaultFlags(op) & NodeHasVarArgs));
- }
-
- PureValue(NodeType op, const AdjacencyList& children, const void* ptr)
- : PureValue(op, children, bitwise_cast<uintptr_t>(ptr))
- {
- }
-
- PureValue(NodeType op, const AdjacencyList& children)
- : PureValue(op, children, static_cast<uintptr_t>(0))
- {
- }
-
- PureValue(Node* node, uintptr_t info)
- : PureValue(node->op(), node->children, info)
- {
- }
-
- PureValue(Node* node, const void* ptr)
- : PureValue(node->op(), node->children, ptr)
- {
- }
-
- PureValue(Node* node)
- : PureValue(node->op(), node->children)
- {
- }
-
- PureValue(WTF::HashTableDeletedValueType)
- : m_op(LastNodeType)
- , m_info(1)
- {
- }
-
- bool operator!() const { return m_op == LastNodeType && !m_info; }
-
- NodeType op() const { return m_op; }
- const AdjacencyList& children() const { return m_children; }
- uintptr_t info() const { return m_info; }
-
- unsigned hash() const
- {
- return WTF::IntHash<int>::hash(static_cast<int>(m_op)) + m_children.hash() + m_info;
- }
-
- bool operator==(const PureValue& other) const
- {
- return m_op == other.m_op
- && m_children == other.m_children
- && m_info == other.m_info;
- }
-
- bool isHashTableDeletedValue() const
- {
- return m_op == LastNodeType && m_info;
- }
-
- void dump(PrintStream& out) const;
-
-private:
- NodeType m_op;
- AdjacencyList m_children;
- uintptr_t m_info;
-};
-
-struct PureValueHash {
- static unsigned hash(const PureValue& key) { return key.hash(); }
- static bool equal(const PureValue& a, const PureValue& b) { return a == b; }
- static const bool safeToCompareToEmptyOrDeleted = true;
-};
-
-} } // namespace JSC::DFG
-
-namespace WTF {
-
-template<typename T> struct DefaultHash;
-template<> struct DefaultHash<JSC::DFG::PureValue> {
- typedef JSC::DFG::PureValueHash Hash;
-};
-
-template<typename T> struct HashTraits;
-template<> struct HashTraits<JSC::DFG::PureValue> : SimpleClassHashTraits<JSC::DFG::PureValue> {
- static const bool emptyValueIsZero = false;
-};
-
-} // namespace WTF
-
-namespace JSC { namespace DFG {
-
-typedef HashMap<PureValue, Node*> PureMap;
-typedef HashMap<PureValue, Vector<Node*>> PureMultiMap;
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGPureValue_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.cpp b/Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.cpp
deleted file mode 100644
index ca58ed3fd..000000000
--- a/Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.cpp
+++ /dev/null
@@ -1,556 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGPutStackSinkingPhase.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGBlockMapInlines.h"
-#include "DFGGraph.h"
-#include "DFGInsertionSet.h"
-#include "DFGPhase.h"
-#include "DFGPreciseLocalClobberize.h"
-#include "DFGSSACalculator.h"
-#include "DFGValidate.h"
-#include "JSCInlines.h"
-#include "OperandsInlines.h"
-
-namespace JSC { namespace DFG {
-
-namespace {
-
-bool verbose = false;
-
-class PutStackSinkingPhase : public Phase {
-public:
- PutStackSinkingPhase(Graph& graph)
- : Phase(graph, "PutStack sinking")
- {
- }
-
- bool run()
- {
- // FIXME: One of the problems of this approach is that it will create a duplicate Phi graph
- // for sunken PutStacks in the presence of interesting control flow merges, and where the
- // value being PutStack'd is also otherwise live in the DFG code. We could work around this
- // by doing the sinking over CPS, or maybe just by doing really smart hoisting. It's also
- // possible that the duplicate Phi graph can be deduplicated by LLVM. It would be best if we
- // could observe that there is already a Phi graph in place that does what we want. In
- // principle if we have a request to place a Phi at a particular place, we could just check
- // if there is already a Phi that does what we want. Because PutStackSinkingPhase runs just
- // after SSA conversion, we have almost a guarantee that the Phi graph we produce here would
- // be trivially redundant to the one we already have.
-
- // FIXME: This phase doesn't adequately use KillStacks. KillStack can be viewed as a def.
- // This is mostly inconsequential; it would be a bug to have a local live at a KillStack.
- // More important is that KillStack should swallow any deferral. After a KillStack, the
- // local should behave like a TOP deferral because it would be invalid for anyone to trust
- // the stack. It's not clear to me if this is important or not.
- // https://bugs.webkit.org/show_bug.cgi?id=145296
-
- if (verbose) {
- dataLog("Graph before PutStack sinking:\n");
- m_graph.dump();
- }
-
- SSACalculator ssaCalculator(m_graph);
- InsertionSet insertionSet(m_graph);
-
- // First figure out where various locals are live.
- BlockMap<Operands<bool>> liveAtHead(m_graph);
- BlockMap<Operands<bool>> liveAtTail(m_graph);
-
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- liveAtHead[block] = Operands<bool>(OperandsLike, block->variablesAtHead);
- liveAtTail[block] = Operands<bool>(OperandsLike, block->variablesAtHead);
-
- liveAtHead[block].fill(false);
- liveAtTail[block].fill(false);
- }
-
- bool changed;
- do {
- changed = false;
-
- for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
- BasicBlock* block = m_graph.block(blockIndex);
- if (!block)
- continue;
-
- Operands<bool> live = liveAtTail[block];
- for (unsigned nodeIndex = block->size(); nodeIndex--;) {
- Node* node = block->at(nodeIndex);
- if (verbose)
- dataLog("Live at ", node, ": ", live, "\n");
-
- auto escapeHandler = [&] (VirtualRegister operand) {
- if (operand.isHeader())
- return;
- if (verbose)
- dataLog(" ", operand, " is live at ", node, "\n");
- live.operand(operand) = true;
- };
-
- // FIXME: This might mishandle LoadVarargs and ForwardVarargs. It might make us
- // think that the locals being written are stack-live here. They aren't. This
- // should be harmless since we overwrite them anyway, but still, it's sloppy.
- // https://bugs.webkit.org/show_bug.cgi?id=145295
- preciseLocalClobberize(
- m_graph, node, escapeHandler, escapeHandler,
- [&] (VirtualRegister operand, LazyNode source) {
- RELEASE_ASSERT(source.isNode());
-
- if (source.asNode() == node) {
- // This is a load. Ignore it.
- return;
- }
-
- RELEASE_ASSERT(node->op() == PutStack);
- live.operand(operand) = false;
- });
- }
-
- if (live == liveAtHead[block])
- continue;
-
- liveAtHead[block] = live;
- changed = true;
-
- for (BasicBlock* predecessor : block->predecessors) {
- for (size_t i = live.size(); i--;)
- liveAtTail[predecessor][i] |= live[i];
- }
- }
-
- } while (changed);
-
- // All of the arguments should be live at head of root. Note that we may find that some
- // locals are live at head of root. This seems wrong but isn't. This will happen for example
- // if the function accesses closure variable #42 for some other function and we either don't
- // have variable #42 at all or we haven't set it at root, for whatever reason. Basically this
- // arises since our aliasing for closure variables is conservatively based on variable number
- // and ignores the owning symbol table. We should probably fix this eventually and make our
- // aliasing more precise.
- //
- // For our purposes here, the imprecision in the aliasing is harmless. It just means that we
- // may not do as much Phi pruning as we wanted.
- for (size_t i = liveAtHead.atIndex(0).numberOfArguments(); i--;)
- DFG_ASSERT(m_graph, nullptr, liveAtHead.atIndex(0).argument(i));
-
- // Next identify where we would want to sink PutStacks to. We say that there is a deferred
- // flush if we had a PutStack with a given FlushFormat but it hasn't been materialized yet.
- // Deferrals have the following lattice; but it's worth noting that the TOP part of the
- // lattice serves an entirely different purpose than the rest of the lattice: it just means
- // that we're in a region of code where nobody should have been relying on the value. The
- // rest of the lattice means that we either have a PutStack that is deferred (i.e. still
- // needs to be executed) or there isn't one (because we've alraedy executed it).
- //
- // Bottom:
- // Represented as DeadFlush.
- // Means that all previous PutStacks have been executed so there is nothing deferred.
- // During merging this is subordinate to the other kinds of deferrals, because it
- // represents the fact that we've already executed all necessary PutStacks. This implies
- // that there *had* been some PutStacks that we should have executed.
- //
- // Top:
- // Represented as ConflictingFlush.
- // Represents the fact that we know, via forward flow, that there isn't any value in the
- // given local that anyone should have been relying on. This comes into play at the
- // prologue (because in SSA form at the prologue no local has any value) or when we merge
- // deferrals for different formats's. A lexical scope in which a local had some semantic
- // meaning will by this point share the same format; if we had stores from different
- // lexical scopes that got merged together then we may have a conflicting format. Hence
- // a conflicting format proves that we're no longer in an area in which the variable was
- // in scope. Note that this is all approximate and only precise enough to later answer
- // questions pertinent to sinking. For example, this doesn't always detect when a local
- // is no longer semantically relevant - we may well have a deferral from inside some
- // inlined call survive outside of that inlined code, and this is generally OK. In the
- // worst case it means that we might think that a deferral that is actually dead must
- // still be executed. But we usually catch that with liveness. Liveness usually catches
- // such cases, but that's not guaranteed since liveness is conservative.
- //
- // What Top does give us is detects situations where we both don't need to care about a
- // deferral and there is no way that we could reason about it anyway. If we merged
- // deferrals for different formats then we wouldn't know the format to use. So, we use
- // Top in that case because that's also a case where we know that we can ignore the
- // deferral.
- //
- // Deferral with a concrete format:
- // Represented by format values other than DeadFlush or ConflictingFlush.
- // Represents the fact that the original code would have done a PutStack but we haven't
- // identified an operation that would have observed that PutStack.
- //
- // This code has some interesting quirks because of the fact that neither liveness nor
- // deferrals are very precise. They are only precise enough to be able to correctly tell us
- // when we may [sic] need to execute PutStacks. This means that they may report the need to
- // execute a PutStack in cases where we actually don't really need it, and that's totally OK.
- BlockMap<Operands<FlushFormat>> deferredAtHead(m_graph);
- BlockMap<Operands<FlushFormat>> deferredAtTail(m_graph);
-
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- deferredAtHead[block] =
- Operands<FlushFormat>(OperandsLike, block->variablesAtHead);
- deferredAtTail[block] =
- Operands<FlushFormat>(OperandsLike, block->variablesAtHead);
- }
-
- for (unsigned local = deferredAtHead.atIndex(0).numberOfLocals(); local--;)
- deferredAtHead.atIndex(0).local(local) = ConflictingFlush;
-
- do {
- changed = false;
-
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- Operands<FlushFormat> deferred = deferredAtHead[block];
-
- for (Node* node : *block) {
- if (verbose)
- dataLog("Deferred at ", node, ":", deferred, "\n");
-
- if (node->op() == GetStack) {
- DFG_ASSERT(
- m_graph, node,
- deferred.operand(node->stackAccessData()->local) != ConflictingFlush);
-
- // A GetStack doesn't affect anything, since we know which local we are reading
- // from.
- continue;
- }
-
- auto escapeHandler = [&] (VirtualRegister operand) {
- if (verbose)
- dataLog("For ", node, " escaping ", operand, "\n");
- if (operand.isHeader())
- return;
- // We will materialize just before any reads.
- deferred.operand(operand) = DeadFlush;
- };
-
- preciseLocalClobberize(
- m_graph, node, escapeHandler, escapeHandler,
- [&] (VirtualRegister operand, LazyNode source) {
- RELEASE_ASSERT(source.isNode());
-
- if (source.asNode() == node) {
- // This is a load. Ignore it.
- return;
- }
-
- deferred.operand(operand) = node->stackAccessData()->format;
- });
- }
-
- if (deferred == deferredAtTail[block])
- continue;
-
- deferredAtTail[block] = deferred;
- changed = true;
-
- for (BasicBlock* successor : block->successors()) {
- for (size_t i = deferred.size(); i--;) {
- if (verbose)
- dataLog("Considering ", VirtualRegister(deferred.operandForIndex(i)), " at ", pointerDump(block), "->", pointerDump(successor), ": ", deferred[i], " and ", deferredAtHead[successor][i], " merges to ");
-
- deferredAtHead[successor][i] =
- merge(deferredAtHead[successor][i], deferred[i]);
-
- if (verbose)
- dataLog(deferredAtHead[successor][i], "\n");
- }
- }
- }
-
- } while (changed);
-
- // We wish to insert PutStacks at all of the materialization points, which are defined
- // implicitly as the places where we set deferred to Dead while it was previously not Dead.
- // To do this, we may need to build some Phi functions to handle stuff like this:
- //
- // Before:
- //
- // if (p)
- // PutStack(r42, @x)
- // else
- // PutStack(r42, @y)
- //
- // After:
- //
- // if (p)
- // Upsilon(@x, ^z)
- // else
- // Upsilon(@y, ^z)
- // z: Phi()
- // PutStack(r42, @z)
- //
- // This means that we have an SSACalculator::Variable for each local, and a Def is any
- // PutStack in the original program. The original PutStacks will simply vanish.
-
- Operands<SSACalculator::Variable*> operandToVariable(
- OperandsLike, m_graph.block(0)->variablesAtHead);
- Vector<VirtualRegister> indexToOperand;
- for (size_t i = m_graph.block(0)->variablesAtHead.size(); i--;) {
- VirtualRegister operand(m_graph.block(0)->variablesAtHead.operandForIndex(i));
-
- SSACalculator::Variable* variable = ssaCalculator.newVariable();
- operandToVariable.operand(operand) = variable;
- ASSERT(indexToOperand.size() == variable->index());
- indexToOperand.append(operand);
- }
-
- HashSet<Node*> putLocalsToSink;
-
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- for (Node* node : *block) {
- switch (node->op()) {
- case PutStack:
- putLocalsToSink.add(node);
- ssaCalculator.newDef(
- operandToVariable.operand(node->stackAccessData()->local),
- block, node->child1().node());
- break;
- case GetStack:
- ssaCalculator.newDef(
- operandToVariable.operand(node->stackAccessData()->local),
- block, node);
- break;
- default:
- break;
- }
- }
- }
-
- ssaCalculator.computePhis(
- [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
- VirtualRegister operand = indexToOperand[variable->index()];
-
- if (!liveAtHead[block].operand(operand))
- return nullptr;
-
- FlushFormat format = deferredAtHead[block].operand(operand);
-
- // We could have an invalid deferral because liveness is imprecise.
- if (!isConcrete(format))
- return nullptr;
-
- if (verbose)
- dataLog("Adding Phi for ", operand, " at ", pointerDump(block), "\n");
-
- Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
- phiNode->mergeFlags(resultFor(format));
- return phiNode;
- });
-
- Operands<Node*> mapping(OperandsLike, m_graph.block(0)->variablesAtHead);
- Operands<FlushFormat> deferred;
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- mapping.fill(nullptr);
-
- for (size_t i = mapping.size(); i--;) {
- VirtualRegister operand(mapping.operandForIndex(i));
-
- SSACalculator::Variable* variable = operandToVariable.operand(operand);
- SSACalculator::Def* def = ssaCalculator.reachingDefAtHead(block, variable);
- if (!def)
- continue;
-
- mapping.operand(operand) = def->value();
- }
-
- if (verbose)
- dataLog("Mapping at top of ", pointerDump(block), ": ", mapping, "\n");
-
- for (SSACalculator::Def* phiDef : ssaCalculator.phisForBlock(block)) {
- VirtualRegister operand = indexToOperand[phiDef->variable()->index()];
-
- insertionSet.insert(0, phiDef->value());
-
- if (verbose)
- dataLog(" Mapping ", operand, " to ", phiDef->value(), "\n");
- mapping.operand(operand) = phiDef->value();
- }
-
- deferred = deferredAtHead[block];
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
- if (verbose)
- dataLog("Deferred at ", node, ":", deferred, "\n");
-
- switch (node->op()) {
- case PutStack: {
- StackAccessData* data = node->stackAccessData();
- VirtualRegister operand = data->local;
- deferred.operand(operand) = data->format;
- if (verbose)
- dataLog(" Mapping ", operand, " to ", node->child1().node(), " at ", node, "\n");
- mapping.operand(operand) = node->child1().node();
- break;
- }
-
- case GetStack: {
- StackAccessData* data = node->stackAccessData();
- FlushFormat format = deferred.operand(data->local);
- if (!isConcrete(format)) {
- DFG_ASSERT(
- m_graph, node,
- deferred.operand(data->local) != ConflictingFlush);
-
- // This means there is no deferral. No deferral means that the most
- // authoritative value for this stack slot is what is stored in the stack. So,
- // keep the GetStack.
- mapping.operand(data->local) = node;
- break;
- }
-
- // We have a concrete deferral, which means a PutStack that hasn't executed yet. It
- // would have stored a value with a certain format. That format must match our
- // format. But more importantly, we can simply use the value that the PutStack would
- // have stored and get rid of the GetStack.
- DFG_ASSERT(m_graph, node, format == data->format);
-
- Node* incoming = mapping.operand(data->local);
- node->child1() = incoming->defaultEdge();
- node->convertToIdentity();
- break;
- }
-
- default: {
- auto escapeHandler = [&] (VirtualRegister operand) {
- if (verbose)
- dataLog("For ", node, " escaping ", operand, "\n");
-
- if (operand.isHeader())
- return;
-
- FlushFormat format = deferred.operand(operand);
- if (!isConcrete(format)) {
- // It's dead now, rather than conflicting.
- deferred.operand(operand) = DeadFlush;
- return;
- }
-
- // Gotta insert a PutStack.
- if (verbose)
- dataLog("Inserting a PutStack for ", operand, " at ", node, "\n");
-
- Node* incoming = mapping.operand(operand);
- DFG_ASSERT(m_graph, node, incoming);
-
- insertionSet.insertNode(
- nodeIndex, SpecNone, PutStack, node->origin,
- OpInfo(m_graph.m_stackAccessData.add(operand, format)),
- Edge(incoming, useKindFor(format)));
-
- deferred.operand(operand) = DeadFlush;
- };
-
- preciseLocalClobberize(
- m_graph, node, escapeHandler, escapeHandler,
- [&] (VirtualRegister, LazyNode) { });
- break;
- } }
- }
-
- NodeAndIndex terminal = block->findTerminal();
- size_t upsilonInsertionPoint = terminal.index;
- NodeOrigin upsilonOrigin = terminal.node->origin;
- for (BasicBlock* successorBlock : block->successors()) {
- for (SSACalculator::Def* phiDef : ssaCalculator.phisForBlock(successorBlock)) {
- Node* phiNode = phiDef->value();
- SSACalculator::Variable* variable = phiDef->variable();
- VirtualRegister operand = indexToOperand[variable->index()];
- if (verbose)
- dataLog("Creating Upsilon for ", operand, " at ", pointerDump(block), "->", pointerDump(successorBlock), "\n");
- FlushFormat format = deferredAtHead[successorBlock].operand(operand);
- DFG_ASSERT(m_graph, nullptr, isConcrete(format));
- UseKind useKind = useKindFor(format);
-
- // We need to get a value for the stack slot. This phase doesn't really have a
- // good way of determining if a stack location got clobbered. It just knows if
- // there is a deferral. The lack of a deferral might mean that a PutStack or
- // GetStack had never happened, or it might mean that the value was read, or
- // that it was written. It's OK for us to make some bad decisions here, since
- // GCSE will clean it up anyway.
- Node* incoming;
- if (isConcrete(deferred.operand(operand))) {
- incoming = mapping.operand(operand);
- DFG_ASSERT(m_graph, phiNode, incoming);
- } else {
- // Issue a GetStack to get the value. This might introduce some redundancy
- // into the code, but if it's bad enough, GCSE will clean it up.
- incoming = insertionSet.insertNode(
- upsilonInsertionPoint, SpecNone, GetStack, upsilonOrigin,
- OpInfo(m_graph.m_stackAccessData.add(operand, format)));
- incoming->setResult(resultFor(format));
- }
-
- insertionSet.insertNode(
- upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
- OpInfo(phiNode), Edge(incoming, useKind));
- }
- }
-
- insertionSet.execute(block);
- }
-
- // Finally eliminate the sunken PutStacks by turning them into Checks. This keeps whatever
- // type check they were doing. Also prepend KillStacks to them to ensure that we know that
- // the relevant value was *not* stored to the stack.
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
-
- if (!putLocalsToSink.contains(node))
- continue;
-
- insertionSet.insertNode(
- nodeIndex, SpecNone, KillStack, node->origin, OpInfo(node->stackAccessData()->local.offset()));
- node->remove();
- }
-
- insertionSet.execute(block);
- }
-
- if (verbose) {
- dataLog("Graph after PutStack sinking:\n");
- m_graph.dump();
- }
-
- return true;
- }
-};
-
-} // anonymous namespace
-
-bool performPutStackSinking(Graph& graph)
-{
- SamplingRegion samplingRegion("DFG PutStack Sinking Phase");
- return runPhase<PutStackSinkingPhase>(graph);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.h b/Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.h
deleted file mode 100644
index 24bbb81f0..000000000
--- a/Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.h
+++ /dev/null
@@ -1,46 +0,0 @@
- /*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGPutStackSinkingPhase_h
-#define DFGPutStackSinkingPhase_h
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-class Graph;
-
-// Sinks PutStacks to the absolute latest point where they can possibly happen, which is usually
-// side-effects that may observe them. This eliminates PutStacks if it sinks them past the point of
-// their deaths.
-
-bool performPutStackSinking(Graph&);
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGPutStackSinkingPhase_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGPhiChildren.cpp b/Source/JavaScriptCore/dfg/DFGResurrectionForValidationPhase.cpp
index de078d088..4c5f6949c 100644
--- a/Source/JavaScriptCore/dfg/DFGPhiChildren.cpp
+++ b/Source/JavaScriptCore/dfg/DFGResurrectionForValidationPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,38 +24,53 @@
*/
#include "config.h"
-#include "DFGPhiChildren.h"
+#include "DFGResurrectionForValidationPhase.h"
#if ENABLE(DFG_JIT)
+#include "DFGBasicBlockInlines.h"
#include "DFGGraph.h"
+#include "DFGInsertionSet.h"
+#include "DFGPhase.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
-PhiChildren::PhiChildren()
-{
-}
-
-PhiChildren::PhiChildren(Graph& graph)
-{
- for (BasicBlock* block : graph.blocksInNaturalOrder()) {
- for (Node* node : *block) {
- if (node->op() != Upsilon)
+class ResurrectionForValidationPhase : public Phase {
+public:
+ ResurrectionForValidationPhase(Graph& graph)
+ : Phase(graph, "resurrection for validation")
+ {
+ }
+
+ bool run()
+ {
+ InsertionSet insertionSet(m_graph);
+
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
continue;
+
+ for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
+ Node* node = block->at(nodeIndex);
+ if (!node->hasResult())
+ continue;
+ insertionSet.insertNode(
+ nodeIndex + 1, SpecNone, Phantom, node->codeOrigin, Edge(node));
+ }
- m_children.add(node->phi(), List()).iterator->value.append(node);
+ insertionSet.execute(block);
}
+
+ return true;
}
-}
-
-PhiChildren::~PhiChildren()
-{
-}
+};
-const PhiChildren::List& PhiChildren::upsilonsOf(Node* node) const
+bool performResurrectionForValidation(Graph& graph)
{
- ASSERT(node->op() == Phi);
- return m_children.find(node)->value;
+ SamplingRegion samplingRegion("DFG Resurrection For Validation Phase");
+ return runPhase<ResurrectionForValidationPhase>(graph);
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGArgumentsEliminationPhase.h b/Source/JavaScriptCore/dfg/DFGResurrectionForValidationPhase.h
index 520b228ad..98378ec52 100644
--- a/Source/JavaScriptCore/dfg/DFGArgumentsEliminationPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGResurrectionForValidationPhase.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,23 +23,30 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DFGArgumentsEliminationPhase_h
-#define DFGArgumentsEliminationPhase_h
+#ifndef DFGResurrectionForValidationPhase_h
+#define DFGResurrectionForValidationPhase_h
+
+#include <wtf/Platform.h>
#if ENABLE(DFG_JIT)
+#include "DFGCommon.h"
+
namespace JSC { namespace DFG {
class Graph;
-// Eliminates allocations of the Arguments-class objects when it can prove that the object doesn't escape
-// and none of the arguments are mutated (either via the object or via the stack).
+// Places a Phantom after every value-producing node, thereby disabling DCE from killing it.
+// This is useful for validating our OSR exit machinery by instituting the requirement that
+// any live-in-bytecode variable should be OSR-available. Without this phase, it's impossible
+// to make such an assertion because our DCE is more aggressive than the bytecode liveness
+// analysis.
-bool performArgumentsElimination(Graph&);
+bool performResurrectionForValidation(Graph&);
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
-#endif // DFGArgumentsEliminationPhase_h
+#endif // DFGResurrectionForValidationPhase_h
diff --git a/Source/JavaScriptCore/dfg/DFGSSACalculator.cpp b/Source/JavaScriptCore/dfg/DFGSSACalculator.cpp
deleted file mode 100644
index 263cd2a4f..000000000
--- a/Source/JavaScriptCore/dfg/DFGSSACalculator.cpp
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGSSACalculator.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGBlockMapInlines.h"
-#include <wtf/CommaPrinter.h>
-#include <wtf/ListDump.h>
-
-namespace JSC { namespace DFG {
-
-void SSACalculator::Variable::dump(PrintStream& out) const
-{
- out.print("var", m_index);
-}
-
-void SSACalculator::Variable::dumpVerbose(PrintStream& out) const
-{
- dump(out);
- if (!m_blocksWithDefs.isEmpty()) {
- out.print("(defs: ");
- CommaPrinter comma;
- for (BasicBlock* block : m_blocksWithDefs)
- out.print(comma, *block);
- out.print(")");
- }
-}
-
-void SSACalculator::Def::dump(PrintStream& out) const
-{
- out.print("def(", *m_variable, ", ", *m_block, ", ", m_value, ")");
-}
-
-SSACalculator::SSACalculator(Graph& graph)
- : m_data(graph)
- , m_graph(graph)
-{
-}
-
-SSACalculator::~SSACalculator()
-{
-}
-
-void SSACalculator::reset()
-{
- m_variables.clear();
- m_defs.clear();
- m_phis.clear();
- for (BlockIndex blockIndex = m_data.size(); blockIndex--;) {
- m_data[blockIndex].m_defs.clear();
- m_data[blockIndex].m_phis.clear();
- }
-}
-
-SSACalculator::Variable* SSACalculator::newVariable()
-{
- return &m_variables.alloc(Variable(m_variables.size()));
-}
-
-SSACalculator::Def* SSACalculator::newDef(Variable* variable, BasicBlock* block, Node* value)
-{
- Def* def = m_defs.add(Def(variable, block, value));
- auto result = m_data[block].m_defs.add(variable, def);
- if (result.isNewEntry)
- variable->m_blocksWithDefs.append(block);
- else
- result.iterator->value = def;
- return def;
-}
-
-SSACalculator::Def* SSACalculator::nonLocalReachingDef(BasicBlock* block, Variable* variable)
-{
- return reachingDefAtTail(m_graph.m_dominators.immediateDominatorOf(block), variable);
-}
-
-SSACalculator::Def* SSACalculator::reachingDefAtTail(BasicBlock* block, Variable* variable)
-{
- for (; block; block = m_graph.m_dominators.immediateDominatorOf(block)) {
- if (Def* def = m_data[block].m_defs.get(variable))
- return def;
- }
- return nullptr;
-}
-
-void SSACalculator::dump(PrintStream& out) const
-{
- out.print("<Variables: [");
- CommaPrinter comma;
- for (unsigned i = 0; i < m_variables.size(); ++i) {
- out.print(comma);
- m_variables[i].dumpVerbose(out);
- }
- out.print("], Defs: [");
- comma = CommaPrinter();
- for (Def* def : const_cast<SSACalculator*>(this)->m_defs)
- out.print(comma, *def);
- out.print("], Phis: [");
- comma = CommaPrinter();
- for (Def* def : const_cast<SSACalculator*>(this)->m_phis)
- out.print(comma, *def);
- out.print("], Block data: [");
- comma = CommaPrinter();
- for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
- BasicBlock* block = m_graph.block(blockIndex);
- if (!block)
- continue;
-
- out.print(comma, *block, "=>(");
- out.print("Defs: {");
- CommaPrinter innerComma;
- for (auto entry : m_data[block].m_defs)
- out.print(innerComma, *entry.key, "->", *entry.value);
- out.print("}, Phis: {");
- innerComma = CommaPrinter();
- for (Def* def : m_data[block].m_phis)
- out.print(innerComma, *def);
- out.print("})");
- }
- out.print("]>");
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGSSACalculator.h b/Source/JavaScriptCore/dfg/DFGSSACalculator.h
deleted file mode 100644
index 4f4f86529..000000000
--- a/Source/JavaScriptCore/dfg/DFGSSACalculator.h
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGSSACalculator_h
-#define DFGSSACalculator_h
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGDominators.h"
-#include "DFGGraph.h"
-
-namespace JSC { namespace DFG {
-
-// SSACalculator provides a reusable tool for using the Cytron, Ferrante, Rosen, Wegman, and
-// Zadeck "Efficiently Computing Static Single Assignment Form and the Control Dependence Graph"
-// (TOPLAS'91) algorithm for computing SSA. SSACalculator doesn't magically do everything for you
-// but it maintains the major data structures and handles most of the non-local reasoning. Here's
-// the workflow of using SSACalculator to execute this algorithm:
-//
-// 0) Create a fresh SSACalculator instance. You will need this instance only for as long as
-// you're not yet done computing SSA.
-//
-// 1) Create an SSACalculator::Variable for every variable that you want to do Phi insertion
-// on. SSACalculator::Variable::index() is a dense indexing of the Variables that you
-// created, so you can easily use a Vector to map the SSACalculator::Variables to your
-// variables.
-//
-// 2) Create a SSACalculator::Def for every assignment to those variables. A Def knows about the
-// variable, the block, and the DFG::Node* that has the value being put into the variable.
-// Note that creating a Def in block B for variable V if block B already has a def for variable
-// V will overwrite the previous Def's DFG::Node* value. This enables you to create Defs by
-// processing basic blocks in forward order. If a block has multiple Defs of a variable, this
-// "just works" because each block will then remember the last Def of each variable.
-//
-// 3) Call SSACalculator::computePhis(). This takes a functor that will create the Phi nodes. The
-// functor returns either the Phi node it created, or nullptr, if it chooses to prune. (As an
-// aside, it's always sound not to prune, and the safest reason for pruning is liveness.) The
-// computePhis() code will record the created Phi nodes as Defs, and it will separately record
-// the list of Phis inserted at each block. It's OK for the functor you pass here to modify the
-// DFG::Graph on the fly, but the easiest way to write this is to just create the Phi nodes by
-// doing Graph::addNode() and return them. It's then best to insert all Phi nodes for a block
-// in bulk as part of the pass you do below, in step (4).
-//
-// 4) Modify the graph to create the SSA data flow. For each block, this should:
-//
-// 4.0) Compute the set of reaching defs (aka available values) for each variable by calling
-// SSACalculator::reachingDefAtHead() for each variable. Record this in a local table that
-// will be incrementally updated as you proceed through the block in forward order in the
-// next steps:
-//
-// FIXME: It might be better to compute reaching defs for all live variables in one go, to
-// avoid doing repeated dom tree traversals.
-// https://bugs.webkit.org/show_bug.cgi?id=136610
-//
-// 4.1) Insert all of the Phi nodes for the block by using SSACalculator::phisForBlock(), and
-// record those Phi nodes as being available values.
-//
-// 4.2) Process the block in forward order. For each load from a variable, replace it with the
-// available SSA value for that variable. For each store, delete it and record the stored
-// value as being available.
-//
-// Note that you have two options of how to replace loads with SSA values. You can replace
-// the load with an Identity node; this will end up working fairly naturally so long as
-// you run GCSE after your phase. Or, you can replace all uses of the load with the SSA
-// value yourself (using the Graph::performSubstitution() idiom), but that requires that
-// your loop over basic blocks proceeds in the appropriate graph order, for example
-// preorder.
-//
-// FIXME: Make it easier to do this, that doesn't involve rerunning GCSE.
-// https://bugs.webkit.org/show_bug.cgi?id=136639
-//
-// 4.3) Insert Upsilons for each Phi in each successor block. Use the available values table to
-// decide the source value for each Phi's variable. Note that you could also use
-// SSACalculator::reachingDefAtTail() instead of the available values table, though your
-// local available values table is likely to be more efficient.
-//
-// The most obvious use of SSACalculator is for the CPS->SSA conversion itself, but it's meant to
-// also be used for SSA update and for things like the promotion of heap fields to local SSA
-// variables.
-
-class SSACalculator {
-public:
- SSACalculator(Graph&);
- ~SSACalculator();
-
- void reset();
-
- class Variable {
- public:
- unsigned index() const { return m_index; }
-
- void dump(PrintStream&) const;
- void dumpVerbose(PrintStream&) const;
-
- private:
- friend class SSACalculator;
-
- Variable()
- : m_index(UINT_MAX)
- {
- }
-
- Variable(unsigned index)
- : m_index(index)
- {
- }
-
- BlockList m_blocksWithDefs;
- unsigned m_index;
- };
-
- class Def {
- public:
- Variable* variable() const { return m_variable; }
- BasicBlock* block() const { return m_block; }
-
- Node* value() const { return m_value; }
-
- void dump(PrintStream&) const;
-
- private:
- friend class SSACalculator;
-
- Def()
- : m_variable(nullptr)
- , m_block(nullptr)
- , m_value(nullptr)
- {
- }
-
- Def(Variable* variable, BasicBlock* block, Node* value)
- : m_variable(variable)
- , m_block(block)
- , m_value(value)
- {
- }
-
- Variable* m_variable;
- BasicBlock* m_block;
- Node* m_value;
- };
-
- Variable* newVariable();
- Def* newDef(Variable*, BasicBlock*, Node*);
-
- Variable* variable(unsigned index) { return &m_variables[index]; }
-
- // The PhiInsertionFunctor takes a Variable and a BasicBlock and either inserts a Phi and
- // returns the Node for that Phi, or it decides that it's not worth it to insert a Phi at that
- // block because of some additional pruning condition (typically liveness) and returns
- // nullptr. If a non-null Node* is returned, a new Def is created, so that
- // nonLocalReachingDef() will find it later. Note that it is generally always sound to not
- // prune any Phis (that is, to always have the functor insert a Phi and never return nullptr).
- template<typename PhiInsertionFunctor>
- void computePhis(const PhiInsertionFunctor& functor)
- {
- DFG_ASSERT(m_graph, nullptr, m_graph.m_dominators.isValid());
-
- for (Variable& variable : m_variables) {
- m_graph.m_dominators.forAllBlocksInPrunedIteratedDominanceFrontierOf(
- variable.m_blocksWithDefs,
- [&] (BasicBlock* block) -> bool {
- Node* phiNode = functor(&variable, block);
- if (!phiNode)
- return false;
-
- BlockData& data = m_data[block];
- Def* phiDef = m_phis.add(Def(&variable, block, phiNode));
- data.m_phis.append(phiDef);
-
- // Note that it's possible to have a block that looks like this before SSA
- // conversion:
- //
- // label:
- // print(x);
- // ...
- // x = 42;
- // goto label;
- //
- // And it may look like this after SSA conversion:
- //
- // label:
- // x1: Phi()
- // ...
- // Upsilon(42, ^x1)
- // goto label;
- //
- // In this case, we will want to insert a Phi in this block, and the block
- // will already have a Def for the variable. When this happens, we don't want
- // the Phi to override the original Def, since the Phi is at the top, the
- // original Def in the m_defs table would have been at the bottom, and we want
- // m_defs to tell us about defs at tail.
- //
- // So, we rely on the fact that HashMap::add() does nothing if the key was
- // already present.
- data.m_defs.add(&variable, phiDef);
- return true;
- });
- }
- }
-
- const Vector<Def*>& phisForBlock(BasicBlock* block)
- {
- return m_data[block].m_phis;
- }
-
- // Ignores defs within the given block; it assumes that you've taken care of those
- // yourself.
- Def* nonLocalReachingDef(BasicBlock*, Variable*);
- Def* reachingDefAtHead(BasicBlock* block, Variable* variable)
- {
- return nonLocalReachingDef(block, variable);
- }
-
- // Considers the def within the given block, but only works at the tail of the block.
- Def* reachingDefAtTail(BasicBlock*, Variable*);
-
- void dump(PrintStream&) const;
-
-private:
- SegmentedVector<Variable> m_variables;
- Bag<Def> m_defs;
-
- Bag<Def> m_phis;
-
- struct BlockData {
- HashMap<Variable*, Def*> m_defs;
- Vector<Def*> m_phis;
- };
-
- BlockMap<BlockData> m_data;
-
- Graph& m_graph;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGSSACalculator_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.cpp b/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.cpp
index 6993bfcac..57fc09529 100644
--- a/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,9 +32,7 @@
#include "DFGGraph.h"
#include "DFGInsertionSet.h"
#include "DFGPhase.h"
-#include "DFGSSACalculator.h"
-#include "DFGVariableAccessDataDump.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -44,8 +42,8 @@ class SSAConversionPhase : public Phase {
public:
SSAConversionPhase(Graph& graph)
: Phase(graph, "SSA conversion")
- , m_calculator(graph)
, m_insertionSet(graph)
+ , m_changed(false)
{
}
@@ -53,310 +51,315 @@ public:
{
RELEASE_ASSERT(m_graph.m_form == ThreadedCPS);
- m_graph.clearReplacements();
- m_graph.m_dominators.computeIfNecessary(m_graph);
-
- if (verbose) {
- dataLog("Graph before SSA transformation:\n");
- m_graph.dump();
- }
-
- // Create a SSACalculator::Variable for every root VariableAccessData.
- for (VariableAccessData& variable : m_graph.m_variableAccessData) {
- if (!variable.isRoot())
+ // Figure out which SetLocal's need flushing. Need to do this while the
+ // Phi graph is still intact.
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
continue;
-
- SSACalculator::Variable* ssaVariable = m_calculator.newVariable();
- ASSERT(ssaVariable->index() == m_variableForSSAIndex.size());
- m_variableForSSAIndex.append(&variable);
- m_ssaVariableForVariable.add(&variable, ssaVariable);
+ for (unsigned nodeIndex = block->size(); nodeIndex--;) {
+ Node* node = block->at(nodeIndex);
+ if (node->op() != Flush)
+ continue;
+ addFlushedLocalOp(node);
+ }
+ }
+ while (!m_flushedLocalOpWorklist.isEmpty()) {
+ Node* node = m_flushedLocalOpWorklist.takeLast();
+ ASSERT(m_flushedLocalOps.contains(node));
+ DFG_NODE_DO_TO_CHILDREN(m_graph, node, addFlushedLocalEdge);
}
- // Find all SetLocals and create Defs for them. We handle SetArgument by creating a
- // GetLocal, and recording the flush format.
+ // Eliminate all duplicate or self-pointing Phi edges. This means that
+ // we transform:
+ //
+ // p: Phi(@n1, @n2, @n3)
+ //
+ // into:
+ //
+ // p: Phi(@x)
+ //
+ // if each @ni in {@n1, @n2, @n3} is either equal to @p to is equal
+ // to @x, for exactly one other @x. Additionally, trivial Phis (i.e.
+ // p: Phi(@x)) are forwarded, so that if have an edge to such @p, we
+ // replace it with @x. This loop does this for Phis only; later we do
+ // such forwarding for Phi references found in other nodes.
+ //
+ // See Aycock and Horspool in CC'00 for a better description of what
+ // we're doing here.
+ do {
+ m_changed = false;
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+ for (unsigned phiIndex = block->phis.size(); phiIndex--;) {
+ Node* phi = block->phis[phiIndex];
+ if (phi->variableAccessData()->isCaptured())
+ continue;
+ forwardPhiChildren(phi);
+ deduplicateChildren(phi);
+ }
+ }
+ } while (m_changed);
+
+ // For each basic block, for each local live at the head of that block,
+ // figure out what node we should be referring to instead of that local.
+ // If it turns out to be a non-trivial Phi, make sure that we create an
+ // SSA Phi and Upsilons in predecessor blocks. We reuse
+ // BasicBlock::variablesAtHead for tracking which nodes to refer to.
for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
- // Must process the block in forward direction because we want to see the last
- // assignment for every local.
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
- if (node->op() != SetLocal && node->op() != SetArgument)
+ for (unsigned i = block->variablesAtHead.size(); i--;) {
+ Node* node = block->variablesAtHead[i];
+ if (!node)
continue;
VariableAccessData* variable = node->variableAccessData();
+ if (variable->isCaptured()) {
+ // Poison this entry in variablesAtHead because we don't
+ // want anyone to try to refer to it, if the variable is
+ // captured.
+ block->variablesAtHead[i] = 0;
+ continue;
+ }
+
+ switch (node->op()) {
+ case Phi:
+ case SetArgument:
+ break;
+ case Flush:
+ case GetLocal:
+ case PhantomLocal:
+ node = node->child1().node();
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ RELEASE_ASSERT(node->op() == Phi || node->op() == SetArgument);
- Node* childNode;
- if (node->op() == SetLocal)
- childNode = node->child1().node();
- else {
- ASSERT(node->op() == SetArgument);
- childNode = m_insertionSet.insertNode(
- nodeIndex, node->variableAccessData()->prediction(),
- GetStack, node->origin,
- OpInfo(m_graph.m_stackAccessData.add(variable->local(), variable->flushFormat())));
- if (!ASSERT_DISABLED)
- m_argumentGetters.add(childNode);
- m_argumentMapping.add(node, childNode);
+ bool isFlushed = m_flushedLocalOps.contains(node);
+
+ if (node->op() == Phi) {
+ Edge edge = node->children.justOneChild();
+ if (edge)
+ node = edge.node(); // It's something from a different basic block.
+ else {
+ // It's a non-trivial Phi.
+ FlushFormat format = variable->flushFormat();
+ NodeFlags result = resultFor(format);
+ UseKind useKind = useKindFor(format);
+
+ node = m_insertionSet.insertNode(0, SpecNone, Phi, CodeOrigin());
+ node->mergeFlags(result);
+ RELEASE_ASSERT((node->flags() & NodeResultMask) == result);
+
+ for (unsigned j = block->predecessors.size(); j--;) {
+ BasicBlock* predecessor = block->predecessors[j];
+ predecessor->appendNonTerminal(
+ m_graph, SpecNone, Upsilon, predecessor->last()->codeOrigin,
+ OpInfo(node), Edge(predecessor->variablesAtTail[i], useKind));
+ }
+
+ if (isFlushed) {
+ // Do nothing. For multiple reasons.
+
+ // Reason #1: If the local is flushed then we don't need to bother
+ // with a MovHint since every path to this point in the code will
+ // have flushed the bytecode variable using a SetLocal and hence
+ // the Availability::flushedAt() will agree, and that will be
+ // sufficient for figuring out how to recover the variable's value.
+
+ // Reason #2: If we had inserted a MovHint and the Phi function had
+ // died (because the only user of the value was the "flush" - i.e.
+ // some asynchronous runtime thingy) then the MovHint would turn
+ // into a ZombieHint, which would fool us into thinking that the
+ // variable is dead.
+
+ // Reason #3: If we had inserted a MovHint then even if the Phi
+ // stayed alive, we would still end up generating inefficient code
+ // since we would be telling the OSR exit compiler to use some SSA
+ // value for the bytecode variable rather than just telling it that
+ // the value was already on the stack.
+ } else {
+ m_insertionSet.insertNode(
+ 0, SpecNone, MovHint, CodeOrigin(),
+ OpInfo(variable->local().offset()), Edge(node));
+ }
+ }
}
- m_calculator.newDef(
- m_ssaVariableForVariable.get(variable), block, childNode);
+ block->variablesAtHead[i] = node;
}
-
+
m_insertionSet.execute(block);
}
- // Decide where Phis are to be inserted. This creates the Phi's but doesn't insert them
- // yet. We will later know where to insert them because SSACalculator is such a bro.
- m_calculator.computePhis(
- [&] (SSACalculator::Variable* ssaVariable, BasicBlock* block) -> Node* {
- VariableAccessData* variable = m_variableForSSAIndex[ssaVariable->index()];
-
- // Prune by liveness. This doesn't buy us much other than compile times.
- Node* headNode = block->variablesAtHead.operand(variable->local());
- if (!headNode)
- return nullptr;
-
- // There is the possibiltiy of "rebirths". The SSA calculator will already prune
- // rebirths for the same VariableAccessData. But it will not be able to prune
- // rebirths that arose from the same local variable number but a different
- // VariableAccessData. We do that pruning here.
- //
- // Here's an example of a rebirth that this would catch:
- //
- // var x;
- // if (foo) {
- // if (bar) {
- // x = 42;
- // } else {
- // x = 43;
- // }
- // print(x);
- // x = 44;
- // } else {
- // x = 45;
- // }
- // print(x); // Without this check, we'd have a Phi for x = 42|43 here.
- //
- // FIXME: Consider feeding local variable numbers, not VariableAccessData*'s, as
- // the "variables" for SSACalculator. That would allow us to eliminate this
- // special case.
- // https://bugs.webkit.org/show_bug.cgi?id=136641
- if (headNode->variableAccessData() != variable)
- return nullptr;
-
- Node* phiNode = m_graph.addNode(
- variable->prediction(), Phi, NodeOrigin());
- FlushFormat format = variable->flushFormat();
- NodeFlags result = resultFor(format);
- phiNode->mergeFlags(result);
- return phiNode;
- });
-
if (verbose) {
- dataLog("Computed Phis, about to transform the graph.\n");
- dataLog("\n");
- dataLog("Graph:\n");
- m_graph.dump();
- dataLog("\n");
- dataLog("Mappings:\n");
- for (unsigned i = 0; i < m_variableForSSAIndex.size(); ++i)
- dataLog(" ", i, ": ", VariableAccessDataDump(m_graph, m_variableForSSAIndex[i]), "\n");
- dataLog("\n");
- dataLog("SSA calculator: ", m_calculator, "\n");
+ dataLog("Variables at head after SSA Phi insertion:\n");
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+ dataLog(" ", *block, ": ", block->variablesAtHead, "\n");
+ }
}
- // Do the bulk of the SSA conversion. For each block, this tracks the operand->Node
- // mapping based on a combination of what the SSACalculator tells us, and us walking over
- // the block in forward order. We use our own data structure, valueForOperand, for
- // determining the local mapping, but we rely on SSACalculator for the non-local mapping.
- //
- // This does three things at once:
- //
- // - Inserts the Phis in all of the places where they need to go. We've already created
- // them and they are accounted for in the SSACalculator's data structures, but we
- // haven't inserted them yet, mostly because we want to insert all of a block's Phis in
- // one go to amortize the cost of node insertion.
- //
- // - Create and insert Upsilons.
+ // At this point variablesAtHead in each block refers to either:
//
- // - Convert all of the preexisting SSA nodes (other than the old CPS Phi nodes) into SSA
- // form by replacing as follows:
+ // 1) A new SSA phi in the current block.
+ // 2) A SetArgument, which will soon get converted into a GetArgument.
+ // 3) An old CPS phi in a different block.
//
- // - MovHint has KillLocal prepended to it.
- //
- // - GetLocal die and get replaced with references to the node specified by
- // valueForOperand.
- //
- // - SetLocal turns into PutStack if it's flushed, or turns into a Check otherwise.
- //
- // - Flush loses its children and turns into a Phantom.
- //
- // - PhantomLocal becomes Phantom, and its child is whatever is specified by
- // valueForOperand.
- //
- // - SetArgument is removed. Note that GetStack nodes have already been inserted.
- Operands<Node*> valueForOperand(OperandsLike, m_graph.block(0)->variablesAtHead);
- for (BasicBlock* block : m_graph.blocksInPreOrder()) {
- valueForOperand.clear();
-
- // CPS will claim that the root block has all arguments live. But we have already done
- // the first step of SSA conversion: argument locals are no longer live at head;
- // instead we have GetStack nodes for extracting the values of arguments. So, we
- // skip the at-head available value calculation for the root block.
- if (block != m_graph.block(0)) {
- for (size_t i = valueForOperand.size(); i--;) {
- Node* nodeAtHead = block->variablesAtHead[i];
- if (!nodeAtHead)
- continue;
-
- VariableAccessData* variable = nodeAtHead->variableAccessData();
-
- if (verbose)
- dataLog("Considering live variable ", VariableAccessDataDump(m_graph, variable), " at head of block ", *block, "\n");
-
- SSACalculator::Variable* ssaVariable = m_ssaVariableForVariable.get(variable);
- SSACalculator::Def* def = m_calculator.reachingDefAtHead(block, ssaVariable);
- if (!def) {
- // If we are required to insert a Phi, then we won't have a reaching def
- // at head.
- continue;
- }
-
- Node* node = def->value();
- if (node->replacement()) {
- // This will occur when a SetLocal had a GetLocal as its source. The
- // GetLocal would get replaced with an actual SSA value by the time we get
- // here. Note that the SSA value with which the GetLocal got replaced
- // would not in turn have a replacement.
- node = node->replacement();
- ASSERT(!node->replacement());
- }
- if (verbose)
- dataLog("Mapping: ", VirtualRegister(valueForOperand.operandForIndex(i)), " -> ", node, "\n");
- valueForOperand[i] = node;
+ // We don't have to do anything for (1) and (2), but we do need to
+ // do a replacement for (3).
+
+ // Clear all replacements, since other phases may have used them.
+ m_graph.clearReplacements();
+
+ // For all of the old CPS Phis, figure out what they correspond to in SSA.
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+ for (unsigned phiIndex = block->phis.size(); phiIndex--;) {
+ Node* phi = block->phis[phiIndex];
+ if (verbose) {
+ dataLog(
+ "Considering ", phi, ", for r", phi->local(),
+ ", and its replacement in ", *block, ", ",
+ block->variablesAtHead.operand(phi->local()), "\n");
}
+ phi->misc.replacement = block->variablesAtHead.operand(phi->local());
+ }
+ }
+
+ // Now make sure that all variablesAtHead in each block points to the
+ // canonical SSA value. Prior to this, variablesAtHead[local] may point to
+ // an old CPS Phi in a different block.
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+ for (size_t i = block->variablesAtHead.size(); i--;) {
+ Node* node = block->variablesAtHead[i];
+ if (!node)
+ continue;
+ while (node->misc.replacement)
+ node = node->misc.replacement;
+ block->variablesAtHead[i] = node;
+ }
+ }
+
+ if (verbose) {
+ dataLog("Variables at head after convergence:\n");
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+ dataLog(" ", *block, ": ", block->variablesAtHead, "\n");
}
+ }
+
+ // Convert operations over locals into operations over SSA nodes.
+ // - GetLocal over captured variables lose their phis.
+ // - GetLocal over uncaptured variables die and get replaced with references
+ // to the node specified by variablesAtHead.
+ // - SetLocal gets NodeMustGenerate if it's flushed, or turns into a
+ // Check otherwise.
+ // - Flush loses its children but remains, because we want to know when a
+ // flushed SetLocal's value is no longer needed. This also makes it simpler
+ // to reason about the format of a local, since we can just do a backwards
+ // analysis (see FlushLivenessAnalysisPhase). As part of the backwards
+ // analysis, we say that the type of a local can be either int32, double,
+ // value, or dead.
+ // - PhantomLocal becomes Phantom, and its child is whatever is specified
+ // by variablesAtHead.
+ // - SetArgument turns into GetArgument unless it's a captured variable.
+ // - Upsilons get their children fixed to refer to the true value of that local
+ // at the end of the block. Prior to this loop, Upsilons will refer to
+ // variableAtTail[operand], which may be any of Flush, PhantomLocal, GetLocal,
+ // SetLocal, SetArgument, or Phi. We accomplish this by setting the
+ // replacement pointers of all of those nodes to refer to either
+ // variablesAtHead[operand], or the child of the SetLocal.
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
- // Insert Phis by asking the calculator what phis there are in this block. Also update
- // valueForOperand with those Phis. For Phis associated with variables that are not
- // flushed, we also insert a MovHint.
- size_t phiInsertionPoint = 0;
- for (SSACalculator::Def* phiDef : m_calculator.phisForBlock(block)) {
- VariableAccessData* variable = m_variableForSSAIndex[phiDef->variable()->index()];
-
- m_insertionSet.insert(phiInsertionPoint, phiDef->value());
- valueForOperand.operand(variable->local()) = phiDef->value();
-
- m_insertionSet.insertNode(
- phiInsertionPoint, SpecNone, MovHint, NodeOrigin(),
- OpInfo(variable->local().offset()), phiDef->value()->defaultEdge());
+ for (unsigned phiIndex = block->phis.size(); phiIndex--;) {
+ block->phis[phiIndex]->misc.replacement =
+ block->variablesAtHead.operand(block->phis[phiIndex]->local());
}
+ for (unsigned nodeIndex = block->size(); nodeIndex--;)
+ ASSERT(!block->at(nodeIndex)->misc.replacement);
for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
Node* node = block->at(nodeIndex);
- if (verbose) {
- dataLog("Processing node ", node, ":\n");
- m_graph.dump(WTF::dataFile(), " ", node);
- }
-
m_graph.performSubstitution(node);
switch (node->op()) {
- case MovHint: {
- m_insertionSet.insertNode(
- nodeIndex, SpecNone, KillStack, node->origin,
- OpInfo(node->unlinkedLocal().offset()));
- break;
- }
-
case SetLocal: {
VariableAccessData* variable = node->variableAccessData();
- Node* child = node->child1().node();
-
- if (!!(node->flags() & NodeIsFlushed)) {
- node->convertToPutStack(
- m_graph.m_stackAccessData.add(
- variable->local(), variable->flushFormat()));
- } else
- node->remove();
-
- if (verbose)
- dataLog("Mapping: ", variable->local(), " -> ", child, "\n");
- valueForOperand.operand(variable->local()) = child;
- break;
- }
-
- case GetStack: {
- ASSERT(m_argumentGetters.contains(node));
- valueForOperand.operand(node->stackAccessData()->local) = node;
+ if (variable->isCaptured() || m_flushedLocalOps.contains(node))
+ node->mergeFlags(NodeMustGenerate);
+ else
+ node->setOpAndDefaultFlags(Check);
+ node->misc.replacement = node->child1().node(); // Only for Upsilons.
break;
}
case GetLocal: {
- VariableAccessData* variable = node->variableAccessData();
+ // It seems tempting to just do forwardPhi(GetLocal), except that we
+ // could have created a new (SSA) Phi, and the GetLocal could still be
+ // referring to an old (CPS) Phi. Uses variablesAtHead to tell us what
+ // to refer to.
node->children.reset();
-
- node->remove();
- if (verbose)
- dataLog("Replacing node ", node, " with ", valueForOperand.operand(variable->local()), "\n");
- node->setReplacement(valueForOperand.operand(variable->local()));
+ VariableAccessData* variable = node->variableAccessData();
+ if (variable->isCaptured())
+ break;
+ node->convertToPhantom();
+ node->misc.replacement = block->variablesAtHead.operand(variable->local());
break;
}
case Flush: {
node->children.reset();
- node->remove();
+ // This is only for Upsilons. An Upsilon will only refer to a Flush if
+ // there were no SetLocals or GetLocals in the block.
+ node->misc.replacement = block->variablesAtHead.operand(node->local());
break;
}
case PhantomLocal: {
- ASSERT(node->child1().useKind() == UntypedUse);
VariableAccessData* variable = node->variableAccessData();
- node->child1() = valueForOperand.operand(variable->local())->defaultEdge();
- node->remove();
+ if (variable->isCaptured())
+ break;
+ node->child1().setNode(block->variablesAtHead.operand(variable->local()));
+ node->convertToPhantom();
+ // This is only for Upsilons. An Upsilon will only refer to a
+ // PhantomLocal if there were no SetLocals or GetLocals in the block.
+ node->misc.replacement = block->variablesAtHead.operand(variable->local());
break;
}
case SetArgument: {
- node->remove();
+ VariableAccessData* variable = node->variableAccessData();
+ if (variable->isCaptured())
+ break;
+ node->setOpAndDefaultFlags(GetArgument);
+ node->mergeFlags(resultFor(node->variableAccessData()->flushFormat()));
break;
}
-
+
default:
break;
}
}
-
- // We want to insert Upsilons just before the end of the block. On the surface this
- // seems dangerous because the Upsilon will have a checking UseKind. But, we will not
- // actually be performing the check at the point of the Upsilon; the check will
- // already have been performed at the point where the original SetLocal was.
- NodeAndIndex terminal = block->findTerminal();
- size_t upsilonInsertionPoint = terminal.index;
- NodeOrigin upsilonOrigin = terminal.node->origin;
- for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) {
- BasicBlock* successorBlock = block->successor(successorIndex);
- for (SSACalculator::Def* phiDef : m_calculator.phisForBlock(successorBlock)) {
- Node* phiNode = phiDef->value();
- SSACalculator::Variable* ssaVariable = phiDef->variable();
- VariableAccessData* variable = m_variableForSSAIndex[ssaVariable->index()];
- FlushFormat format = variable->flushFormat();
- UseKind useKind = useKindFor(format);
-
- m_insertionSet.insertNode(
- upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
- OpInfo(phiNode), Edge(
- valueForOperand.operand(variable->local()),
- useKind));
- }
- }
-
- m_insertionSet.execute(block);
}
// Free all CPS phis and reset variables vectors.
@@ -371,39 +374,95 @@ public:
block->variablesAtTail.clear();
block->valuesAtHead.clear();
block->valuesAtHead.clear();
- block->ssa = std::make_unique<BasicBlock::SSAData>(block);
+ block->ssa = adoptPtr(new BasicBlock::SSAData(block));
}
- m_graph.m_argumentFormats.resize(m_graph.m_arguments.size());
- for (unsigned i = m_graph.m_arguments.size(); i--;) {
- FlushFormat format = FlushedJSValue;
-
- Node* node = m_argumentMapping.get(m_graph.m_arguments[i]);
-
- RELEASE_ASSERT(node);
- format = node->stackAccessData()->format;
-
- m_graph.m_argumentFormats[i] = format;
- m_graph.m_arguments[i] = node; // Record the load that loads the arguments for the benefit of exit profiling.
- }
+ m_graph.m_arguments.clear();
m_graph.m_form = SSA;
+ return true;
+ }
- if (verbose) {
- dataLog("Graph after SSA transformation:\n");
- m_graph.dump();
+private:
+ void forwardPhiChildren(Node* node)
+ {
+ for (unsigned i = 0; i < AdjacencyList::Size; ++i) {
+ Edge& edge = node->children.child(i);
+ if (!edge)
+ break;
+ m_changed |= forwardPhiEdge(edge);
}
-
+ }
+
+ Node* forwardPhi(Node* node)
+ {
+ for (;;) {
+ switch (node->op()) {
+ case Phi: {
+ Edge edge = node->children.justOneChild();
+ if (!edge)
+ return node;
+ node = edge.node();
+ break;
+ }
+ case GetLocal:
+ case SetLocal:
+ if (node->variableAccessData()->isCaptured())
+ return node;
+ node = node->child1().node();
+ break;
+ default:
+ return node;
+ }
+ }
+ }
+
+ bool forwardPhiEdge(Edge& edge)
+ {
+ Node* newNode = forwardPhi(edge.node());
+ if (newNode == edge.node())
+ return false;
+ edge.setNode(newNode);
return true;
}
+
+ void deduplicateChildren(Node* node)
+ {
+ for (unsigned i = 0; i < AdjacencyList::Size; ++i) {
+ Edge edge = node->children.child(i);
+ if (!edge)
+ break;
+ if (edge == node) {
+ node->children.removeEdge(i--);
+ m_changed = true;
+ continue;
+ }
+ for (unsigned j = i + 1; j < AdjacencyList::Size; ++j) {
+ if (node->children.child(j) == edge) {
+ node->children.removeEdge(j--);
+ m_changed = true;
+ }
+ }
+ }
+ }
+
+ void addFlushedLocalOp(Node* node)
+ {
+ if (m_flushedLocalOps.contains(node))
+ return;
+ m_flushedLocalOps.add(node);
+ m_flushedLocalOpWorklist.append(node);
+ }
-private:
- SSACalculator m_calculator;
+ void addFlushedLocalEdge(Node*, Edge edge)
+ {
+ addFlushedLocalOp(edge.node());
+ }
+
InsertionSet m_insertionSet;
- HashMap<VariableAccessData*, SSACalculator::Variable*> m_ssaVariableForVariable;
- HashMap<Node*, Node*> m_argumentMapping;
- HashSet<Node*> m_argumentGetters;
- Vector<VariableAccessData*> m_variableForSSAIndex;
+ HashSet<Node*> m_flushedLocalOps;
+ Vector<Node*> m_flushedLocalOpWorklist;
+ bool m_changed;
};
bool performSSAConversion(Graph& graph)
diff --git a/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.h b/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.h
index 86c999d70..2fa5ff41a 100644
--- a/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGSSAConversionPhase_h
#define DFGSSAConversionPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
@@ -34,9 +36,10 @@ class Graph;
// Convert ThreadedCPS form into SSA form. This results in a form that has:
//
-// - Minimal Phi's. We use the the Cytron et al (TOPLAS'91) algorithm for
-// Phi insertion. Most of the algorithm is implemented in SSACalculator
-// and Dominators.
+// - Roughly minimal Phi's. We use the Aycock & Horspool fixpoint for
+// converting the CPS maximal Phis into SSA minimal Phis, with the caveat
+// that irreducible control flow may result in some missed opportunities
+// for Phi reduction.
//
// - No uses of GetLocal/SetLocal except for captured variables and flushes.
// After this, any remaining SetLocal means Flush. PhantomLocals become
diff --git a/Source/JavaScriptCore/dfg/DFGSSALoweringPhase.cpp b/Source/JavaScriptCore/dfg/DFGSSALoweringPhase.cpp
index c4b67a361..51d5fd0e4 100644
--- a/Source/JavaScriptCore/dfg/DFGSSALoweringPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSSALoweringPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,7 +32,7 @@
#include "DFGGraph.h"
#include "DFGInsertionSet.h"
#include "DFGPhase.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -69,49 +69,36 @@ private:
{
switch (m_node->op()) {
case GetByVal:
- case HasIndexedProperty:
lowerBoundsCheck(m_node->child1(), m_node->child2(), m_node->child3());
break;
case PutByVal:
- case PutByValDirect: {
- Edge base = m_graph.varArgChild(m_node, 0);
- Edge index = m_graph.varArgChild(m_node, 1);
- Edge storage = m_graph.varArgChild(m_node, 3);
- if (lowerBoundsCheck(base, index, storage))
- break;
-
- if (m_node->arrayMode().typedArrayType() != NotTypedArray && m_node->arrayMode().isOutOfBounds()) {
- Node* length = m_insertionSet.insertNode(
- m_nodeIndex, SpecInt32, GetArrayLength, m_node->origin,
- OpInfo(m_node->arrayMode().asWord()), base, storage);
-
- m_graph.varArgChild(m_node, 4) = Edge(length, KnownInt32Use);
- break;
- }
+ case PutByValDirect:
+ lowerBoundsCheck(
+ m_graph.varArgChild(m_node, 0),
+ m_graph.varArgChild(m_node, 1),
+ m_graph.varArgChild(m_node, 3));
break;
- }
default:
break;
}
}
- bool lowerBoundsCheck(Edge base, Edge index, Edge storage)
+ void lowerBoundsCheck(Edge base, Edge index, Edge storage)
{
if (!m_node->arrayMode().permitsBoundsCheckLowering())
- return false;
+ return;
if (!m_node->arrayMode().lengthNeedsStorage())
storage = Edge();
Node* length = m_insertionSet.insertNode(
- m_nodeIndex, SpecInt32, GetArrayLength, m_node->origin,
+ m_nodeIndex, SpecInt32, GetArrayLength, m_node->codeOrigin,
OpInfo(m_node->arrayMode().asWord()), base, storage);
m_insertionSet.insertNode(
- m_nodeIndex, SpecInt32, CheckInBounds, m_node->origin,
+ m_nodeIndex, SpecInt32, CheckInBounds, m_node->codeOrigin,
index, Edge(length, KnownInt32Use));
- return true;
}
InsertionSet m_insertionSet;
diff --git a/Source/JavaScriptCore/dfg/DFGSafeToExecute.h b/Source/JavaScriptCore/dfg/DFGSafeToExecute.h
index 9d7bbd8e2..b6cd5dc08 100644
--- a/Source/JavaScriptCore/dfg/DFGSafeToExecute.h
+++ b/Source/JavaScriptCore/dfg/DFGSafeToExecute.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGSafeToExecute_h
#define DFGSafeToExecute_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGGraph.h"
@@ -46,37 +48,29 @@ public:
switch (edge.useKind()) {
case UntypedUse:
case Int32Use:
- case DoubleRepUse:
- case DoubleRepRealUse:
- case Int52RepUse:
- case NumberUse:
case RealNumberUse:
+ case NumberUse:
case BooleanUse:
case CellUse:
case ObjectUse:
- case FunctionUse:
case FinalObjectUse:
case ObjectOrOtherUse:
case StringIdentUse:
case StringUse:
- case SymbolUse:
case StringObjectUse:
case StringOrStringObjectUse:
- case NotStringVarUse:
case NotCellUse:
case OtherUse:
- case MiscUse:
case MachineIntUse:
- case DoubleRepMachineIntUse:
return;
case KnownInt32Use:
if (m_state.forNode(edge).m_type & ~SpecInt32)
m_result = false;
return;
-
- case KnownBooleanUse:
- if (m_state.forNode(edge).m_type & ~SpecBoolean)
+
+ case KnownNumberUse:
+ if (m_state.forNode(edge).m_type & ~SpecFullNumber)
m_result = false;
return;
@@ -105,15 +99,8 @@ private:
// Determines if it's safe to execute a node within the given abstract state. This may
// return false conservatively. If it returns true, then you can hoist the given node
-// up to the given point and expect that it will not crash. It also guarantees that the
-// node will not produce a malformed JSValue or object pointer when executed in the
-// given state. But this doesn't guarantee that the node will produce the result you
-// wanted. For example, you may have a GetByOffset from a prototype that only makes
-// semantic sense if you've also checked that some nearer prototype doesn't also have
-// a property of the same name. This could still return true even if that check hadn't
-// been performed in the given abstract state. That's fine though: the load can still
-// safely execute before that check, so long as that check continues to guard any
-// user-observable things done to the loaded value.
+// up to the given point and expect that it will not crash. This doesn't guarantee that
+// the node will produce the result you wanted other than not crashing.
template<typename AbstractStateType>
bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node)
{
@@ -124,20 +111,16 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node)
switch (node->op()) {
case JSConstant:
- case DoubleConstant:
- case Int52Constant:
+ case WeakJSConstant:
case Identity:
case ToThis:
case CreateThis:
case GetCallee:
- case GetArgumentCount:
case GetLocal:
case SetLocal:
- case PutStack:
- case KillStack:
- case GetStack:
case MovHint:
case ZombieHint:
+ case GetArgument:
case Phantom:
case Upsilon:
case Phi:
@@ -153,9 +136,9 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node)
case BitURShift:
case ValueToInt32:
case UInt32ToNumber:
+ case Int32ToDouble:
case DoubleAsInt32:
case ArithAdd:
- case ArithClz32:
case ArithSub:
case ArithNegate:
case ArithMul:
@@ -165,36 +148,33 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node)
case ArithAbs:
case ArithMin:
case ArithMax:
- case ArithPow:
case ArithSqrt:
- case ArithFRound:
- case ArithRound:
case ArithSin:
case ArithCos:
- case ArithLog:
case ValueAdd:
case GetById:
case GetByIdFlush:
case PutById:
- case PutByIdFlush:
case PutByIdDirect:
case CheckStructure:
- case GetExecutable:
+ case CheckExecutable:
case GetButterfly:
case CheckArray:
case Arrayify:
case ArrayifyToStructure:
case GetScope:
+ case GetMyScope:
+ case SkipTopScope:
case SkipScope:
+ case GetClosureRegisters:
case GetClosureVar:
case PutClosureVar:
case GetGlobalVar:
case PutGlobalVar:
+ case VariableWatchpoint:
case VarInjectionWatchpoint:
- case CheckCell:
- case CheckBadCell:
- case CheckNotEmpty:
- case CheckIdent:
+ case CheckFunction:
+ case AllocationProfileWatchpoint:
case RegExpExec:
case RegExpTest:
case CompareLess:
@@ -204,13 +184,9 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node)
case CompareEq:
case CompareEqConstant:
case CompareStrictEq:
+ case CompareStrictEqConstant:
case Call:
case Construct:
- case CallVarargs:
- case ConstructVarargs:
- case LoadVarargs:
- case CallForwardVarargs:
- case ConstructForwardVarargs:
case NewObject:
case NewArray:
case NewArrayWithSize:
@@ -219,8 +195,6 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node)
case Breakpoint:
case ProfileWillCall:
case ProfileDidCall:
- case ProfileType:
- case ProfileControlFlow:
case CheckHasInstance:
case InstanceOf:
case IsUndefined:
@@ -228,23 +202,27 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node)
case IsNumber:
case IsString:
case IsObject:
- case IsObjectOrNull:
case IsFunction:
case TypeOf:
case LogicalNot:
case ToPrimitive:
case ToString:
- case CallStringConstructor:
case NewStringObject:
case MakeRope:
case In:
case CreateActivation:
- case CreateDirectArguments:
- case CreateScopedArguments:
- case CreateClonedArguments:
- case GetFromArguments:
- case PutToArguments:
+ case TearOffActivation:
+ case CreateArguments:
+ case PhantomArguments:
+ case TearOffArguments:
+ case GetMyArgumentsLength:
+ case GetMyArgumentByVal:
+ case GetMyArgumentsLengthSafe:
+ case GetMyArgumentByValSafe:
+ case CheckArgumentsNotCreated:
+ case NewFunctionNoCheck:
case NewFunction:
+ case NewFunctionExpression:
case Jump:
case Branch:
case Switch:
@@ -261,49 +239,21 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node)
case CheckTierUpInLoop:
case CheckTierUpAtReturn:
case CheckTierUpAndOSREnter:
- case CheckTierUpWithNestedTriggerAndOSREnter:
case LoopHint:
+ case Int52ToDouble:
+ case Int52ToValue:
case StoreBarrier:
+ case ConditionalStoreBarrier:
+ case StoreBarrierWithNullCheck:
case InvalidationPoint:
case NotifyWrite:
+ case FunctionReentryWatchpoint:
+ case TypedArrayWatchpoint:
case CheckInBounds:
case ConstantStoragePointer:
case Check:
- case MultiPutByOffset:
- case ValueRep:
- case DoubleRep:
- case Int52Rep:
- case BooleanToNumber:
- case FiatInt52:
- case GetGetter:
- case GetSetter:
- case GetEnumerableLength:
- case HasGenericProperty:
- case HasStructureProperty:
- case HasIndexedProperty:
- case GetDirectPname:
- case GetPropertyEnumerator:
- case GetEnumeratorStructurePname:
- case GetEnumeratorGenericPname:
- case ToIndexString:
- case PhantomNewObject:
- case PhantomNewFunction:
- case PhantomCreateActivation:
- case PutHint:
- case CheckStructureImmediate:
- case MaterializeNewObject:
- case MaterializeCreateActivation:
- case PhantomDirectArguments:
- case PhantomClonedArguments:
- case GetMyArgumentByVal:
- case ForwardVarargs:
return true;
-
- case BottomValue:
- // If in doubt, assume that this isn't safe to execute, just because we have no way of
- // compiling this node.
- return false;
-
+
case GetByVal:
case GetIndexedPropertyStorage:
case GetArrayLength:
@@ -322,62 +272,22 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node)
return node->arrayMode().modeForPut().alreadyChecked(
graph, node, state.forNode(graph.varArgChild(node, 0)));
+ case StructureTransitionWatchpoint:
+ return state.forNode(node->child1()).m_futurePossibleStructure.isSubsetOf(
+ StructureSet(node->structure()));
+
case PutStructure:
+ case PhantomPutStructure:
case AllocatePropertyStorage:
case ReallocatePropertyStorage:
- return state.forNode(node->child1()).m_structure.isSubsetOf(
- StructureSet(node->transition()->previous));
+ return state.forNode(node->child1()).m_currentKnownStructure.isSubsetOf(
+ StructureSet(node->structureTransitionData().previousStructure));
case GetByOffset:
- case GetGetterSetterByOffset:
- case PutByOffset: {
- PropertyOffset offset = node->storageAccessData().offset;
-
- if (state.structureClobberState() == StructuresAreWatched) {
- if (JSObject* knownBase = node->child1()->dynamicCastConstant<JSObject*>()) {
- if (graph.isSafeToLoad(knownBase, offset))
- return true;
- }
- }
+ case PutByOffset:
+ return state.forNode(node->child1()).m_currentKnownStructure.isValidOffset(
+ graph.m_storageAccessData[node->storageAccessDataIndex()].offset);
- StructureAbstractValue& value = state.forNode(node->child1()).m_structure;
- if (value.isInfinite())
- return false;
- for (unsigned i = value.size(); i--;) {
- if (!value[i]->isValidOffset(offset))
- return false;
- }
- return true;
- }
-
- case MultiGetByOffset: {
- // We can't always guarantee that the MultiGetByOffset is safe to execute if it
- // contains loads from prototypes. If the load requires a check in IR, which is rare, then
- // we currently claim that we don't know if it's safe to execute because finding that
- // check in the abstract state would be hard. If the load requires watchpoints, we just
- // check if we're not in a clobbered state (i.e. in between a side effect and an
- // invalidation point).
- for (const MultiGetByOffsetCase& getCase : node->multiGetByOffsetData().cases) {
- GetByOffsetMethod method = getCase.method();
- switch (method.kind()) {
- case GetByOffsetMethod::Invalid:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- case GetByOffsetMethod::Constant: // OK because constants are always safe to execute.
- case GetByOffsetMethod::Load: // OK because the MultiGetByOffset has its own checks for loading from self.
- break;
- case GetByOffsetMethod::LoadFromPrototype:
- // Only OK if the state isn't clobbered. That's almost always the case.
- if (state.structureClobberState() != StructuresAreWatched)
- return false;
- if (!graph.isSafeToLoad(method.prototype()->cast<JSObject*>(), method.offset()))
- return false;
- break;
- }
- }
- return true;
- }
-
case LastNodeType:
RELEASE_ASSERT_NOT_REACHED();
return false;
diff --git a/Source/JavaScriptCore/dfg/DFGSafepoint.cpp b/Source/JavaScriptCore/dfg/DFGSafepoint.cpp
deleted file mode 100644
index 11ba5ad9b..000000000
--- a/Source/JavaScriptCore/dfg/DFGSafepoint.cpp
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGSafepoint.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGPlan.h"
-#include "DFGScannable.h"
-#include "DFGThreadData.h"
-#include "JSCInlines.h"
-
-namespace JSC { namespace DFG {
-
-Safepoint::Result::~Result()
-{
- RELEASE_ASSERT(m_wasChecked);
-}
-
-bool Safepoint::Result::didGetCancelled()
-{
- m_wasChecked = true;
- return m_didGetCancelled;
-}
-
-Safepoint::Safepoint(Plan& plan, Result& result)
- : m_plan(plan)
- , m_didCallBegin(false)
- , m_result(result)
-{
- RELEASE_ASSERT(result.m_wasChecked);
- result.m_wasChecked = false;
- result.m_didGetCancelled = false;
-}
-
-Safepoint::~Safepoint()
-{
- RELEASE_ASSERT(m_didCallBegin);
- if (ThreadData* data = m_plan.threadData) {
- RELEASE_ASSERT(data->m_safepoint == this);
- data->m_rightToRun.lock();
- data->m_safepoint = nullptr;
- }
-}
-
-void Safepoint::add(Scannable* scannable)
-{
- RELEASE_ASSERT(!m_didCallBegin);
- m_scannables.append(scannable);
-}
-
-void Safepoint::begin()
-{
- RELEASE_ASSERT(!m_didCallBegin);
- m_didCallBegin = true;
- if (ThreadData* data = m_plan.threadData) {
- RELEASE_ASSERT(!data->m_safepoint);
- data->m_safepoint = this;
- data->m_rightToRun.unlock();
- }
-}
-
-void Safepoint::checkLivenessAndVisitChildren(SlotVisitor& visitor)
-{
- RELEASE_ASSERT(m_didCallBegin);
-
- if (m_result.m_didGetCancelled)
- return; // We were cancelled during a previous GC!
-
- if (!isKnownToBeLiveDuringGC())
- return;
-
- for (unsigned i = m_scannables.size(); i--;)
- m_scannables[i]->visitChildren(visitor);
-}
-
-bool Safepoint::isKnownToBeLiveDuringGC()
-{
- RELEASE_ASSERT(m_didCallBegin);
-
- if (m_result.m_didGetCancelled)
- return true; // We were cancelled during a previous GC, so let's not mess with it this time around - pretend it's live and move on.
-
- return m_plan.isKnownToBeLiveDuringGC();
-}
-
-void Safepoint::cancel()
-{
- RELEASE_ASSERT(m_didCallBegin);
- RELEASE_ASSERT(!m_result.m_didGetCancelled); // We cannot get cancelled twice because subsequent GCs will think that we're alive and they will not do anything to us.
-
- m_plan.cancel();
- m_result.m_didGetCancelled = true;
-}
-
-VM& Safepoint::vm() const
-{
- return m_plan.vm;
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGSafepoint.h b/Source/JavaScriptCore/dfg/DFGSafepoint.h
deleted file mode 100644
index 96f4b8ecd..000000000
--- a/Source/JavaScriptCore/dfg/DFGSafepoint.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGSafepoint_h
-#define DFGSafepoint_h
-
-#if ENABLE(DFG_JIT)
-
-#include <wtf/Vector.h>
-
-namespace JSC {
-
-class SlotVisitor;
-class VM;
-
-namespace DFG {
-
-class Scannable;
-struct Plan;
-
-class Safepoint {
-public:
- class Result {
- public:
- Result()
- : m_didGetCancelled(false)
- , m_wasChecked(true)
- {
- }
-
- ~Result();
-
- bool didGetCancelled();
-
- private:
- friend class Safepoint;
-
- bool m_didGetCancelled;
- bool m_wasChecked;
- };
-
- Safepoint(Plan&, Result&);
- ~Safepoint();
-
- void add(Scannable*);
-
- void begin();
-
- void checkLivenessAndVisitChildren(SlotVisitor&);
- bool isKnownToBeLiveDuringGC();
- void cancel();
-
- VM& vm() const;
-
-private:
- Plan& m_plan;
- Vector<Scannable*> m_scannables;
- bool m_didCallBegin;
- Result& m_result;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGSafepoint_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGSaneStringGetByValSlowPathGenerator.h b/Source/JavaScriptCore/dfg/DFGSaneStringGetByValSlowPathGenerator.h
index 4c95bccfe..b9198472b 100644
--- a/Source/JavaScriptCore/dfg/DFGSaneStringGetByValSlowPathGenerator.h
+++ b/Source/JavaScriptCore/dfg/DFGSaneStringGetByValSlowPathGenerator.h
@@ -26,6 +26,8 @@
#ifndef DFGSaneStringGetByValSlowPathGenerator_h
#define DFGSaneStringGetByValSlowPathGenerator_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
diff --git a/Source/JavaScriptCore/dfg/DFGScannable.h b/Source/JavaScriptCore/dfg/DFGScannable.h
deleted file mode 100644
index 6b85cc024..000000000
--- a/Source/JavaScriptCore/dfg/DFGScannable.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGScannable_h
-#define DFGScannable_h
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC {
-
-class SlotVisitor;
-
-namespace DFG {
-
-class Scannable {
-public:
- Scannable() { }
- virtual ~Scannable() { }
-
- virtual void visitChildren(SlotVisitor&) = 0;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGScannable_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGScoreBoard.h b/Source/JavaScriptCore/dfg/DFGScoreBoard.h
index c8795c8a4..15af609a9 100644
--- a/Source/JavaScriptCore/dfg/DFGScoreBoard.h
+++ b/Source/JavaScriptCore/dfg/DFGScoreBoard.h
@@ -55,27 +55,21 @@ public:
assertClear();
}
- void sortFree()
- {
- std::sort(m_free.begin(), m_free.end());
- }
-
void assertClear()
{
- if (ASSERT_DISABLED)
- return;
-
+#if !ASSERT_DISABLED
// For every entry in the used list the use count of the virtual register should be zero, or max, due to it being a preserved local.
for (size_t i = 0; i < m_used.size(); ++i)
- RELEASE_ASSERT(!m_used[i] || m_used[i] == max());
+ ASSERT(!m_used[i] || m_used[i] == max());
// For every entry in the free list, the use count should be zero.
for (size_t i = 0; i < m_free.size(); ++i)
- RELEASE_ASSERT(!m_used[m_free[i]]);
+ ASSERT(!m_used[m_free[i]]);
// There must not be duplicates in the free list.
for (size_t i = 0; i < m_free.size(); ++i) {
for (size_t j = i + 1; j < m_free.size(); ++j)
- RELEASE_ASSERT(m_free[i] != m_free[j]);
+ ASSERT(m_free[i] != m_free[j]);
}
+#endif
}
VirtualRegister allocate()
diff --git a/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h b/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h
index 8de98d88d..31945cea0 100644
--- a/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h
+++ b/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h
@@ -26,6 +26,8 @@
#ifndef DFGSilentRegisterSavePlan_h
#define DFGSilentRegisterSavePlan_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
diff --git a/Source/JavaScriptCore/dfg/DFGSlowPathGenerator.h b/Source/JavaScriptCore/dfg/DFGSlowPathGenerator.h
index add1a23f2..34d3631ea 100644
--- a/Source/JavaScriptCore/dfg/DFGSlowPathGenerator.h
+++ b/Source/JavaScriptCore/dfg/DFGSlowPathGenerator.h
@@ -26,12 +26,15 @@
#ifndef DFGSlowPathGenerator_h
#define DFGSlowPathGenerator_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
#include "DFGSilentRegisterSavePlan.h"
#include "DFGSpeculativeJIT.h"
#include <wtf/FastMalloc.h>
+#include <wtf/PassOwnPtr.h>
namespace JSC { namespace DFG {
@@ -49,7 +52,7 @@ public:
jit->m_currentNode = m_currentNode;
generateInternal(jit);
if (!ASSERT_DISABLED)
- jit->m_jit.abortWithReason(DFGSlowPathGeneratorFellThrough);
+ jit->m_jit.breakpoint(); // make sure that the generator jumps back to somewhere
}
MacroAssembler::Label label() const { return m_label; }
virtual MacroAssembler::Call call() const
@@ -328,77 +331,94 @@ protected:
};
template<typename JumpType, typename FunctionType, typename ResultType>
-inline std::unique_ptr<SlowPathGenerator> slowPathCall(
+inline PassOwnPtr<SlowPathGenerator> slowPathCall(
JumpType from, SpeculativeJIT* jit, FunctionType function,
ResultType result, SpillRegistersMode spillMode = NeedToSpill)
{
- return std::make_unique<CallResultAndNoArgumentsSlowPathGenerator<JumpType, FunctionType, ResultType>>(
- from, jit, function, spillMode, result);
+ return adoptPtr(
+ new CallResultAndNoArgumentsSlowPathGenerator<
+ JumpType, FunctionType, ResultType>(
+ from, jit, function, spillMode, result));
}
template<
typename JumpType, typename FunctionType, typename ResultType,
typename ArgumentType1>
-inline std::unique_ptr<SlowPathGenerator> slowPathCall(
+inline PassOwnPtr<SlowPathGenerator> slowPathCall(
JumpType from, SpeculativeJIT* jit, FunctionType function,
ResultType result, ArgumentType1 argument1,
SpillRegistersMode spillMode = NeedToSpill)
{
- return std::make_unique<CallResultAndOneArgumentSlowPathGenerator<JumpType, FunctionType, ResultType, ArgumentType1>>(
- from, jit, function, spillMode, result, argument1);
+ return adoptPtr(
+ new CallResultAndOneArgumentSlowPathGenerator<
+ JumpType, FunctionType, ResultType, ArgumentType1>(
+ from, jit, function, spillMode, result, argument1));
}
template<
typename JumpType, typename FunctionType, typename ResultType,
typename ArgumentType1, typename ArgumentType2>
-inline std::unique_ptr<SlowPathGenerator> slowPathCall(
+inline PassOwnPtr<SlowPathGenerator> slowPathCall(
JumpType from, SpeculativeJIT* jit, FunctionType function,
ResultType result, ArgumentType1 argument1, ArgumentType2 argument2,
SpillRegistersMode spillMode = NeedToSpill)
{
- return std::make_unique<CallResultAndTwoArgumentsSlowPathGenerator<JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2>>(
- from, jit, function, spillMode, result, argument1, argument2);
+ return adoptPtr(
+ new CallResultAndTwoArgumentsSlowPathGenerator<
+ JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2>(
+ from, jit, function, spillMode, result, argument1, argument2));
}
template<
typename JumpType, typename FunctionType, typename ResultType,
typename ArgumentType1, typename ArgumentType2, typename ArgumentType3>
-inline std::unique_ptr<SlowPathGenerator> slowPathCall(
+inline PassOwnPtr<SlowPathGenerator> slowPathCall(
JumpType from, SpeculativeJIT* jit, FunctionType function,
ResultType result, ArgumentType1 argument1, ArgumentType2 argument2,
ArgumentType3 argument3, SpillRegistersMode spillMode = NeedToSpill)
{
- return std::make_unique<CallResultAndThreeArgumentsSlowPathGenerator<JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2,
- ArgumentType3>>(from, jit, function, spillMode, result, argument1, argument2, argument3);
+ return adoptPtr(
+ new CallResultAndThreeArgumentsSlowPathGenerator<
+ JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2,
+ ArgumentType3>(
+ from, jit, function, spillMode, result, argument1, argument2,
+ argument3));
}
template<
typename JumpType, typename FunctionType, typename ResultType,
typename ArgumentType1, typename ArgumentType2, typename ArgumentType3,
typename ArgumentType4>
-inline std::unique_ptr<SlowPathGenerator> slowPathCall(
+inline PassOwnPtr<SlowPathGenerator> slowPathCall(
JumpType from, SpeculativeJIT* jit, FunctionType function,
ResultType result, ArgumentType1 argument1, ArgumentType2 argument2,
ArgumentType3 argument3, ArgumentType4 argument4,
SpillRegistersMode spillMode = NeedToSpill)
{
- return std::make_unique<CallResultAndFourArgumentsSlowPathGenerator<JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2,
- ArgumentType3, ArgumentType4>>(from, jit, function, spillMode, result, argument1, argument2, argument3, argument4);
+ return adoptPtr(
+ new CallResultAndFourArgumentsSlowPathGenerator<
+ JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2,
+ ArgumentType3, ArgumentType4>(
+ from, jit, function, spillMode, result, argument1, argument2,
+ argument3, argument4));
}
template<
typename JumpType, typename FunctionType, typename ResultType,
typename ArgumentType1, typename ArgumentType2, typename ArgumentType3,
typename ArgumentType4, typename ArgumentType5>
-inline std::unique_ptr<SlowPathGenerator> slowPathCall(
+inline PassOwnPtr<SlowPathGenerator> slowPathCall(
JumpType from, SpeculativeJIT* jit, FunctionType function,
ResultType result, ArgumentType1 argument1, ArgumentType2 argument2,
ArgumentType3 argument3, ArgumentType4 argument4, ArgumentType5 argument5,
SpillRegistersMode spillMode = NeedToSpill)
{
- return std::make_unique<CallResultAndFiveArgumentsSlowPathGenerator<JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2,
- ArgumentType3, ArgumentType4, ArgumentType5>>(from, jit, function, spillMode, result, argument1, argument2, argument3,
- argument4, argument5);
+ return adoptPtr(
+ new CallResultAndFiveArgumentsSlowPathGenerator<
+ JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2,
+ ArgumentType3, ArgumentType4, ArgumentType5>(
+ from, jit, function, spillMode, result, argument1, argument2,
+ argument3, argument4, argument5));
}
template<typename JumpType, typename DestinationType, typename SourceType, unsigned numberOfAssignments>
@@ -431,31 +451,37 @@ private:
};
template<typename JumpType, typename DestinationType, typename SourceType, unsigned numberOfAssignments>
-inline std::unique_ptr<SlowPathGenerator> slowPathMove(
+inline PassOwnPtr<SlowPathGenerator> slowPathMove(
JumpType from, SpeculativeJIT* jit, SourceType source[numberOfAssignments], DestinationType destination[numberOfAssignments])
{
- return std::make_unique<AssigningSlowPathGenerator<JumpType, DestinationType, SourceType, numberOfAssignments>>(
- from, jit, destination, source);
+ return adoptPtr(
+ new AssigningSlowPathGenerator<
+ JumpType, DestinationType, SourceType, numberOfAssignments>(
+ from, jit, destination, source));
}
template<typename JumpType, typename DestinationType, typename SourceType>
-inline std::unique_ptr<SlowPathGenerator> slowPathMove(
+inline PassOwnPtr<SlowPathGenerator> slowPathMove(
JumpType from, SpeculativeJIT* jit, SourceType source, DestinationType destination)
{
SourceType sourceArray[1] = { source };
DestinationType destinationArray[1] = { destination };
- return std::make_unique<AssigningSlowPathGenerator<JumpType, DestinationType, SourceType, 1>>(
- from, jit, destinationArray, sourceArray);
+ return adoptPtr(
+ new AssigningSlowPathGenerator<
+ JumpType, DestinationType, SourceType, 1>(
+ from, jit, destinationArray, sourceArray));
}
template<typename JumpType, typename DestinationType, typename SourceType>
-inline std::unique_ptr<SlowPathGenerator> slowPathMove(
+inline PassOwnPtr<SlowPathGenerator> slowPathMove(
JumpType from, SpeculativeJIT* jit, SourceType source1, DestinationType destination1, SourceType source2, DestinationType destination2)
{
SourceType sourceArray[2] = { source1, source2 };
DestinationType destinationArray[2] = { destination1, destination2 };
- return std::make_unique<AssigningSlowPathGenerator<JumpType, DestinationType, SourceType, 2>>(
- from, jit, destinationArray, sourceArray);
+ return adoptPtr(
+ new AssigningSlowPathGenerator<
+ JumpType, DestinationType, SourceType, 2>(
+ from, jit, destinationArray, sourceArray));
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
index 0d9bdeb7a..ee64f721f 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,21 +28,15 @@
#if ENABLE(DFG_JIT)
-#include "BinarySwitch.h"
+#include "Arguments.h"
#include "DFGAbstractInterpreterInlines.h"
#include "DFGArrayifySlowPathGenerator.h"
+#include "DFGBinarySwitch.h"
#include "DFGCallArrayAllocatorSlowPathGenerator.h"
-#include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
-#include "DFGMayExit.h"
-#include "DFGOSRExitFuzz.h"
#include "DFGSaneStringGetByValSlowPathGenerator.h"
#include "DFGSlowPathGenerator.h"
-#include "DirectArguments.h"
-#include "JSCInlines.h"
-#include "JSEnvironmentRecord.h"
-#include "JSLexicalEnvironment.h"
+#include "JSCJSValueInlines.h"
#include "LinkBuffer.h"
-#include "ScopedArguments.h"
#include "ScratchRegisterAllocator.h"
#include "WriteBarrierBuffer.h"
#include <wtf/MathExtras.h>
@@ -53,7 +47,6 @@ SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
: m_compileOkay(true)
, m_jit(jit)
, m_currentNode(0)
- , m_lastGeneratedNode(LastNodeType)
, m_indexInBlock(0)
, m_generationInfo(m_jit.graph().frameRegisterCount())
, m_state(m_jit.graph())
@@ -91,12 +84,12 @@ void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure,
if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
#if USE(JSVALUE64)
- m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
+ m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR);
for (unsigned i = numElements; i < vectorLength; ++i)
m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
#else
EncodedValueDescriptor value;
- value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
+ value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, QNaN));
for (unsigned i = numElements; i < vectorLength; ++i) {
m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
@@ -107,88 +100,10 @@ void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure,
// I want a slow path that also loads out the storage pointer, and that's
// what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
// of work for a very small piece of functionality. :-/
- addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
- slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
- structure, numElements));
-}
-
-void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
-{
- if (inlineCallFrame && !inlineCallFrame->isVarargs())
- m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
- else {
- VirtualRegister argumentCountRegister;
- if (!inlineCallFrame)
- argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
- else
- argumentCountRegister = inlineCallFrame->argumentCountRegister;
- m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
- if (!includeThis)
- m_jit.sub32(TrustedImm32(1), lengthGPR);
- }
-}
-
-void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
-{
- emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
-}
-
-void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
-{
- if (origin.inlineCallFrame) {
- if (origin.inlineCallFrame->isClosureCall) {
- m_jit.loadPtr(
- JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
- calleeGPR);
- } else {
- m_jit.move(
- TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
- calleeGPR);
- }
- } else
- m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
-}
-
-void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
-{
- m_jit.addPtr(
- TrustedImm32(
- JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
- GPRInfo::callFrameRegister, startGPR);
-}
-
-MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
-{
- if (!doOSRExitFuzzing())
- return MacroAssembler::Jump();
-
- MacroAssembler::Jump result;
-
- m_jit.pushToSave(GPRInfo::regT0);
- m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
- m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
- m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
- unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
- unsigned at = Options::fireOSRExitFuzzAt();
- if (at || atOrAfter) {
- unsigned threshold;
- MacroAssembler::RelationalCondition condition;
- if (atOrAfter) {
- threshold = atOrAfter;
- condition = MacroAssembler::Below;
- } else {
- threshold = at;
- condition = MacroAssembler::NotEqual;
- }
- MacroAssembler::Jump ok = m_jit.branch32(
- condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
- m_jit.popToRestore(GPRInfo::regT0);
- result = m_jit.jump();
- ok.link(&m_jit);
- }
- m_jit.popToRestore(GPRInfo::regT0);
-
- return result;
+ addSlowPathGenerator(adoptPtr(
+ new CallArrayAllocatorSlowPathGenerator(
+ slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
+ structure, numElements)));
}
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
@@ -196,14 +111,7 @@ void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource
if (!m_compileOkay)
return;
ASSERT(m_isCheckingArgumentTypes || m_canExit);
- JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
- if (fuzzJump.isSet()) {
- JITCompiler::JumpList jumpsToFail;
- jumpsToFail.append(fuzzJump);
- jumpsToFail.append(jumpToFail);
- m_jit.appendExitInfo(jumpsToFail);
- } else
- m_jit.appendExitInfo(jumpToFail);
+ m_jit.appendExitInfo(jumpToFail);
m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
}
@@ -212,14 +120,7 @@ void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource
if (!m_compileOkay)
return;
ASSERT(m_isCheckingArgumentTypes || m_canExit);
- JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
- if (fuzzJump.isSet()) {
- JITCompiler::JumpList myJumpsToFail;
- myJumpsToFail.append(jumpsToFail);
- myJumpsToFail.append(fuzzJump);
- m_jit.appendExitInfo(myJumpsToFail);
- } else
- m_jit.appendExitInfo(jumpsToFail);
+ m_jit.appendExitInfo(jumpsToFail);
m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
}
@@ -290,8 +191,6 @@ void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs js
return;
speculationCheck(kind, jsValueRegs, node, m_jit.jump());
m_compileOkay = false;
- if (verboseCompilationEnabled())
- dataLog("Bailing compilation.\n");
}
void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
@@ -327,9 +226,9 @@ RegisterSet SpeculativeJIT::usedRegisters()
return result;
}
-void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
+void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
{
- m_slowPathGenerators.append(WTF::move(slowPathGenerator));
+ m_slowPathGenerators.append(slowPathGenerator);
}
void SpeculativeJIT::runSlowPathGenerators()
@@ -398,20 +297,18 @@ SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spil
ASSERT(info.gpr() == source);
ASSERT(isJSInt32(info.registerFormat()));
if (node->hasConstant()) {
- ASSERT(node->isInt32Constant());
+ ASSERT(isInt32Constant(node));
fillAction = SetInt32Constant;
} else
fillAction = Load32Payload;
} else if (registerFormat == DataFormatBoolean) {
#if USE(JSVALUE64)
RELEASE_ASSERT_NOT_REACHED();
-#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
fillAction = DoNothingForFill;
-#endif
#elif USE(JSVALUE32_64)
ASSERT(info.gpr() == source);
if (node->hasConstant()) {
- ASSERT(node->isBooleanConstant());
+ ASSERT(isBooleanConstant(node));
fillAction = SetBooleanConstant;
} else
fillAction = Load32Payload;
@@ -419,8 +316,8 @@ SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spil
} else if (registerFormat == DataFormatCell) {
ASSERT(info.gpr() == source);
if (node->hasConstant()) {
- DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
- node->asCell(); // To get the assertion.
+ JSValue value = valueOfJSConstant(node);
+ ASSERT_UNUSED(value, value.isCell());
fillAction = SetCellConstant;
} else {
#if USE(JSVALUE64)
@@ -435,6 +332,8 @@ SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spil
} else if (registerFormat == DataFormatInt52) {
if (node->hasConstant())
fillAction = SetInt52Constant;
+ else if (isJSInt32(info.spillFormat()) || info.spillFormat() == DataFormatJS)
+ fillAction = Load32PayloadConvertToInt52;
else if (info.spillFormat() == DataFormatInt52)
fillAction = Load64;
else if (info.spillFormat() == DataFormatStrictInt52)
@@ -442,14 +341,17 @@ SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spil
else if (info.spillFormat() == DataFormatNone)
fillAction = Load64;
else {
+ // Should never happen. Anything that qualifies as an int32 will never
+ // be turned into a cell (immediate spec fail) or a double (to-double
+ // conversions involve a separate node).
RELEASE_ASSERT_NOT_REACHED();
-#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
fillAction = Load64; // Make GCC happy.
-#endif
}
} else if (registerFormat == DataFormatStrictInt52) {
if (node->hasConstant())
fillAction = SetStrictInt52Constant;
+ else if (isJSInt32(info.spillFormat()) || info.spillFormat() == DataFormatJS)
+ fillAction = Load32PayloadSignExtend;
else if (info.spillFormat() == DataFormatInt52)
fillAction = Load64ShiftInt52Right;
else if (info.spillFormat() == DataFormatStrictInt52)
@@ -457,23 +359,26 @@ SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spil
else if (info.spillFormat() == DataFormatNone)
fillAction = Load64;
else {
+ // Should never happen. Anything that qualifies as an int32 will never
+ // be turned into a cell (immediate spec fail) or a double (to-double
+ // conversions involve a separate node).
RELEASE_ASSERT_NOT_REACHED();
-#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
fillAction = Load64; // Make GCC happy.
-#endif
}
} else {
ASSERT(registerFormat & DataFormatJS);
#if USE(JSVALUE64)
ASSERT(info.gpr() == source);
if (node->hasConstant()) {
- if (node->isCellConstant())
+ if (valueOfJSConstant(node).isCell())
fillAction = SetTrustedJSConstant;
- else
fillAction = SetJSConstant;
} else if (info.spillFormat() == DataFormatInt32) {
ASSERT(registerFormat == DataFormatJSInt32);
fillAction = Load32PayloadBoxInt;
+ } else if (info.spillFormat() == DataFormatDouble) {
+ ASSERT(registerFormat == DataFormatJSDouble);
+ fillAction = LoadDoubleBoxDouble;
} else
fillAction = Load64;
#else
@@ -527,16 +432,18 @@ SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spil
#if USE(JSVALUE64)
if (node->hasConstant()) {
- node->asNumber(); // To get the assertion.
+ ASSERT(isNumberConstant(node));
fillAction = SetDoubleConstant;
- } else {
- ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
+ } else if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
+ // it was already spilled previously and not as a double, which means we need unboxing.
+ ASSERT(info.spillFormat() & DataFormatJS);
+ fillAction = LoadJSUnboxDouble;
+ } else
fillAction = LoadDouble;
- }
#elif USE(JSVALUE32_64)
- ASSERT(info.registerFormat() == DataFormatDouble);
+ ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
if (node->hasConstant()) {
- node->asNumber(); // To get the assertion.
+ ASSERT(isNumberConstant(node));
fillAction = SetDoubleConstant;
} else
fillAction = LoadDouble;
@@ -581,21 +488,21 @@ void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTr
case DoNothingForFill:
break;
case SetInt32Constant:
- m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
+ m_jit.move(Imm32(valueOfInt32Constant(plan.node())), plan.gpr());
break;
#if USE(JSVALUE64)
case SetInt52Constant:
- m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
+ m_jit.move(Imm64(valueOfJSConstant(plan.node()).asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
break;
case SetStrictInt52Constant:
- m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
+ m_jit.move(Imm64(valueOfJSConstant(plan.node()).asMachineInt()), plan.gpr());
break;
#endif // USE(JSVALUE64)
case SetBooleanConstant:
- m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
+ m_jit.move(TrustedImm32(valueOfBooleanConstant(plan.node())), plan.gpr());
break;
case SetCellConstant:
- m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
+ m_jit.move(TrustedImmPtr(valueOfJSConstant(plan.node()).asCell()), plan.gpr());
break;
#if USE(JSVALUE64)
case SetTrustedJSConstant:
@@ -605,7 +512,7 @@ void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTr
m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
break;
case SetDoubleConstant:
- m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
+ m_jit.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan.node()))), canTrample);
m_jit.move64ToDouble(canTrample, plan.fpr());
break;
case Load32PayloadBoxInt:
@@ -621,12 +528,20 @@ void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTr
m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
break;
+ case LoadDoubleBoxDouble:
+ m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
+ m_jit.sub64(GPRInfo::tagTypeNumberRegister, plan.gpr());
+ break;
+ case LoadJSUnboxDouble:
+ m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), canTrample);
+ unboxDouble(canTrample, plan.fpr());
+ break;
#else
case SetJSConstantTag:
- m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
+ m_jit.move(Imm32(valueOfJSConstant(plan.node()).tag()), plan.gpr());
break;
case SetJSConstantPayload:
- m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
+ m_jit.move(Imm32(valueOfJSConstant(plan.node()).payload()), plan.gpr());
break;
case SetInt32Tag:
m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
@@ -638,7 +553,7 @@ void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTr
m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
break;
case SetDoubleConstant:
- m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
+ m_jit.loadDouble(addressOfDoubleConstant(plan.node()), plan.fpr());
break;
#endif
case Load32Tag:
@@ -676,10 +591,8 @@ JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, A
switch (arrayMode.arrayClass()) {
case Array::OriginalArray: {
CRASH();
-#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
return result;
-#endif
}
case Array::Array:
@@ -716,9 +629,6 @@ JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGP
case Array::Contiguous:
return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
- case Array::Undecided:
- return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
-
case Array::ArrayStorage:
case Array::SlowPutArrayStorage: {
ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
@@ -784,12 +694,13 @@ void SpeculativeJIT::checkArray(Node* node)
case Array::Int32:
case Array::Double:
case Array::Contiguous:
- case Array::Undecided:
case Array::ArrayStorage:
case Array::SlowPutArrayStorage: {
GPRTemporary temp(this);
GPRReg tempGPR = temp.gpr();
- m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
+ m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
speculationCheck(
BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
@@ -797,27 +708,19 @@ void SpeculativeJIT::checkArray(Node* node)
noResult(m_currentNode);
return;
}
- case Array::DirectArguments:
- speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
- noResult(m_currentNode);
- return;
- case Array::ScopedArguments:
- speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
- noResult(m_currentNode);
- return;
+ case Array::Arguments:
+ expectedClassInfo = Arguments::info();
+ break;
default:
- speculateCellTypeWithoutTypeFiltering(
- node->child1(), baseReg,
- typeForTypedArrayType(node->arrayMode().typedArrayType()));
- noResult(m_currentNode);
- return;
+ expectedClassInfo = classInfoForType(node->arrayMode().typedArrayType());
+ break;
}
RELEASE_ASSERT(expectedClassInfo);
GPRTemporary temp(this);
- GPRTemporary temp2(this);
- m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr());
speculationCheck(
BadType, JSValueSource::unboxedCell(baseReg), node,
m_jit.branchPtr(
@@ -847,19 +750,22 @@ void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
MacroAssembler::JumpList slowPath;
if (node->op() == ArrayifyToStructure) {
- slowPath.append(m_jit.branchWeakStructure(
+ slowPath.append(m_jit.branchWeakPtr(
JITCompiler::NotEqual,
- JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
+ JITCompiler::Address(baseReg, JSCell::structureOffset()),
node->structure()));
} else {
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
+
m_jit.load8(
- MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
+ MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR);
slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
}
- addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
- slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
+ addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
+ slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
noResult(m_currentNode);
}
@@ -943,9 +849,12 @@ void SpeculativeJIT::compileIn(Node* node)
{
SpeculateCellOperand base(this, node->child2());
GPRReg baseGPR = base.gpr();
-
- if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
- if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
+
+ if (isConstant(node->child1().node())) {
+ JSString* string =
+ jsDynamicCast<JSString*>(valueOfJSConstant(node->child1().node()));
+ if (string && string->tryGetValueImpl()
+ && string->tryGetValueImpl()->isIdentifier()) {
StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
GPRTemporary result(this);
@@ -956,33 +865,36 @@ void SpeculativeJIT::compileIn(Node* node)
MacroAssembler::PatchableJump jump = m_jit.patchableJump();
MacroAssembler::Label done = m_jit.label();
- // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
- // we can cast it to const AtomicStringImpl* safely.
- auto slowPath = slowPathCall(
+ OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
jump.m_jump, this, operationInOptimize,
JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
- static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
+ string->tryGetValueImpl());
- stubInfo->codeOrigin = node->origin.semantic;
+ stubInfo->codeOrigin = node->codeOrigin;
stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
stubInfo->patch.usedRegisters = usedRegisters();
- stubInfo->patch.spillMode = NeedToSpill;
-
+ stubInfo->patch.registersFlushed = false;
+
m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
- addSlowPathGenerator(WTF::move(slowPath));
-
+ addSlowPathGenerator(slowPath.release());
+
base.use();
-
- blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
+
+#if USE(JSVALUE64)
+ jsValueResult(
+ resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
+#else
+ booleanResult(resultGPR, node, UseChildrenCalledExplicitly);
+#endif
return;
}
}
-
+
JSValueOperand key(this, node->child1());
JSValueRegs regs = key.jsValueRegs();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultGPR = result.gpr();
base.use();
@@ -992,7 +904,11 @@ void SpeculativeJIT::compileIn(Node* node)
callOperation(
operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
baseGPR, regs);
- blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
+#if USE(JSVALUE64)
+ jsValueResult(resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
+#else
+ booleanResult(resultGPR, node, UseChildrenCalledExplicitly);
+#endif
}
bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
@@ -1126,29 +1042,6 @@ GPRTemporary::GPRTemporary(
}
#endif // USE(JSVALUE32_64)
-JSValueRegsTemporary::JSValueRegsTemporary() { }
-
-JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
-#if USE(JSVALUE64)
- : m_gpr(jit)
-#else
- : m_payloadGPR(jit)
- , m_tagGPR(jit)
-#endif
-{
-}
-
-JSValueRegsTemporary::~JSValueRegsTemporary() { }
-
-JSValueRegs JSValueRegsTemporary::regs()
-{
-#if USE(JSVALUE64)
- return JSValueRegs(m_gpr.gpr());
-#else
- return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
-#endif
-}
-
void GPRTemporary::adopt(GPRTemporary& other)
{
ASSERT(!m_jit);
@@ -1204,8 +1097,8 @@ FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+ BasicBlock* taken = branchNode->takenBlock();
+ BasicBlock* notTaken = branchNode->notTakenBlock();
SpeculateDoubleOperand op1(this, node->child1());
SpeculateDoubleOperand op2(this, node->child2());
@@ -1216,8 +1109,8 @@ void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, J
void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+ BasicBlock* taken = branchNode->takenBlock();
+ BasicBlock* notTaken = branchNode->notTakenBlock();
MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
@@ -1237,33 +1130,52 @@ void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
if (masqueradesAsUndefinedWatchpointIsStillValid()) {
if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
speculationCheck(
- BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
+ BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
}
if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
speculationCheck(
- BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
+ BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
}
} else {
+ GPRTemporary structure(this);
+ GPRReg structureGPR = structure.gpr();
+
+ m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
speculationCheck(
BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
- m_jit.branchIfNotObject(op1GPR));
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
}
speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
m_jit.branchTest8(
MacroAssembler::NonZero,
- MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
+ m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
speculationCheck(
BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
- m_jit.branchIfNotObject(op2GPR));
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
}
speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
m_jit.branchTest8(
MacroAssembler::NonZero,
- MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
@@ -1273,8 +1185,8 @@ void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+ BasicBlock* taken = branchNode->takenBlock();
+ BasicBlock* notTaken = branchNode->notTakenBlock();
// The branch instruction will branch to the taken block.
// If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
@@ -1285,13 +1197,13 @@ void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode,
notTaken = tmp;
}
- if (node->child1()->isBooleanConstant()) {
- bool imm = node->child1()->asBoolean();
+ if (isBooleanConstant(node->child1().node())) {
+ bool imm = valueOfBooleanConstant(node->child1().node());
SpeculateBooleanOperand op2(this, node->child2());
branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
- } else if (node->child2()->isBooleanConstant()) {
+ } else if (isBooleanConstant(node->child2().node())) {
SpeculateBooleanOperand op1(this, node->child1());
- bool imm = node->child2()->asBoolean();
+ bool imm = valueOfBooleanConstant(node->child2().node());
branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
} else {
SpeculateBooleanOperand op1(this, node->child1());
@@ -1304,8 +1216,8 @@ void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode,
void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+ BasicBlock* taken = branchNode->takenBlock();
+ BasicBlock* notTaken = branchNode->notTakenBlock();
// The branch instruction will branch to the taken block.
// If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
@@ -1316,13 +1228,13 @@ void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JI
notTaken = tmp;
}
- if (node->child1()->isInt32Constant()) {
- int32_t imm = node->child1()->asInt32();
+ if (isInt32Constant(node->child1().node())) {
+ int32_t imm = valueOfInt32Constant(node->child1().node());
SpeculateInt32Operand op2(this, node->child2());
branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
- } else if (node->child2()->isInt32Constant()) {
+ } else if (isInt32Constant(node->child2().node())) {
SpeculateInt32Operand op1(this, node->child1());
- int32_t imm = node->child2()->asInt32();
+ int32_t imm = valueOfInt32Constant(node->child2().node());
branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
} else {
SpeculateInt32Operand op1(this, node->child1());
@@ -1348,10 +1260,10 @@ bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::Relationa
if (node->isBinaryUseKind(Int32Use))
compilePeepHoleInt32Branch(node, branchNode, condition);
#if USE(JSVALUE64)
- else if (node->isBinaryUseKind(Int52RepUse))
+ else if (node->isBinaryUseKind(MachineIntUse))
compilePeepHoleInt52Branch(node, branchNode, condition);
#endif // USE(JSVALUE64)
- else if (node->isBinaryUseKind(DoubleRepUse))
+ else if (node->isBinaryUseKind(NumberUse))
compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
else if (node->op() == CompareEq) {
if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
@@ -1362,9 +1274,9 @@ bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::Relationa
compilePeepHoleBooleanBranch(node, branchNode, condition);
else if (node->isBinaryUseKind(ObjectUse))
compilePeepHoleObjectEquality(node, branchNode);
- else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
+ else if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse)
compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
- else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
+ else if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse)
compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
else {
nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
@@ -1405,12 +1317,10 @@ void SpeculativeJIT::compileMovHint(Node* node)
m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
}
-void SpeculativeJIT::bail(AbortReason reason)
+void SpeculativeJIT::bail()
{
- if (verboseCompilationEnabled())
- dataLog("Bailing compilation.\n");
m_compileOkay = true;
- m_jit.abortWithReason(reason, m_lastGeneratedNode);
+ m_jit.breakpoint();
clearGenerationInfo();
}
@@ -1425,26 +1335,29 @@ void SpeculativeJIT::compileCurrentBlock()
m_jit.blockHeads()[m_block->index] = m_jit.label();
- if (!m_block->intersectionOfCFAHasVisited) {
+ if (!m_block->cfaHasVisited) {
// Don't generate code for basic blocks that are unreachable according to CFA.
// But to be sure that nobody has generated a jump to this block, drop in a
// breakpoint here.
- m_jit.abortWithReason(DFGUnreachableBasicBlock);
+ m_jit.breakpoint();
return;
}
m_stream->appendAndLog(VariableEvent::reset());
m_jit.jitAssertHasValidCallFrame();
- m_jit.jitAssertTagsInPlace();
- m_jit.jitAssertArgumentCountSane();
+ for (size_t i = 0; i < m_block->variablesAtHead.numberOfArguments(); ++i) {
+ m_stream->appendAndLog(
+ VariableEvent::setLocal(
+ virtualRegisterForArgument(i), virtualRegisterForArgument(i), DataFormatJS));
+ }
+
m_state.reset();
m_state.beginBasicBlock(m_block);
- for (size_t i = m_block->variablesAtHead.size(); i--;) {
- int operand = m_block->variablesAtHead.operandForIndex(i);
- Node* node = m_block->variablesAtHead[i];
+ for (size_t i = 0; i < m_block->variablesAtHead.numberOfLocals(); ++i) {
+ Node* node = m_block->variablesAtHead.local(i);
if (!node)
continue; // No need to record dead SetLocal's.
@@ -1452,12 +1365,10 @@ void SpeculativeJIT::compileCurrentBlock()
DataFormat format;
if (!node->refCount())
continue; // No need to record dead SetLocal's.
- format = dataFormatFor(variable->flushFormat());
+ else
+ format = dataFormatFor(variable->flushFormat());
m_stream->appendAndLog(
- VariableEvent::setLocal(
- VirtualRegister(operand),
- variable->machineLocal(),
- format));
+ VariableEvent::setLocal(virtualRegisterForLocal(i), variable->machineLocal(), format));
}
m_codeOriginForExitTarget = CodeOrigin();
@@ -1466,50 +1377,77 @@ void SpeculativeJIT::compileCurrentBlock()
for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
m_currentNode = m_block->at(m_indexInBlock);
- // We may have hit a contradiction that the CFA was aware of but that the JIT
+ // We may have his a contradiction that the CFA was aware of but that the JIT
// didn't cause directly.
if (!m_state.isValid()) {
- bail(DFGBailedAtTopOfBlock);
+ bail();
return;
}
-
- if (ASSERT_DISABLED)
- m_canExit = true; // Essentially disable the assertions.
- else
- m_canExit = mayExit(m_jit.graph(), m_currentNode);
- m_interpreter.startExecuting();
+ m_canExit = m_currentNode->canExit();
+ bool shouldExecuteEffects = m_interpreter.startExecuting(m_currentNode);
m_jit.setForNode(m_currentNode);
- m_codeOriginForExitTarget = m_currentNode->origin.forExit;
- m_codeOriginForExitProfile = m_currentNode->origin.semantic;
- m_lastGeneratedNode = m_currentNode->op();
-
- ASSERT(m_currentNode->shouldGenerate());
-
- if (verboseCompilationEnabled()) {
- dataLogF(
- "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
- (int)m_currentNode->index(),
- m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
- dataLog("\n");
- }
-
- compile(m_currentNode);
-
- if (belongsInMinifiedGraph(m_currentNode->op()))
- m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
-
+ m_codeOriginForExitTarget = m_currentNode->codeOriginForExitTarget;
+ m_codeOriginForExitProfile = m_currentNode->codeOrigin;
+ if (!m_currentNode->shouldGenerate()) {
+ switch (m_currentNode->op()) {
+ case JSConstant:
+ m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
+ break;
+
+ case WeakJSConstant:
+ m_jit.addWeakReference(m_currentNode->weakConstant());
+ m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
+ break;
+
+ case SetLocal:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+
+ case MovHint:
+ compileMovHint(m_currentNode);
+ break;
+
+ case ZombieHint: {
+ recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
+ break;
+ }
+
+ default:
+ if (belongsInMinifiedGraph(m_currentNode->op()))
+ m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
+ break;
+ }
+ } else {
+
+ if (verboseCompilationEnabled()) {
+ dataLogF(
+ "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
+ (int)m_currentNode->index(),
+ m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
+ dataLog("\n");
+ }
+
+ compile(m_currentNode);
+
#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
- m_jit.clearRegisterAllocationOffsets();
+ m_jit.clearRegisterAllocationOffsets();
#endif
-
- if (!m_compileOkay) {
- bail(DFGBailedAtEndOfNode);
- return;
+
+ if (!m_compileOkay) {
+ bail();
+ return;
+ }
+
+ if (belongsInMinifiedGraph(m_currentNode->op())) {
+ m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
+ noticeOSRBirth(m_currentNode);
+ }
}
// Make sure that the abstract state is rematerialized for the next node.
- m_interpreter.executeEffects(m_indexInBlock);
+ if (shouldExecuteEffects)
+ m_interpreter.executeEffects(m_indexInBlock);
}
// Perform the most basic verification that children have been used correctly.
@@ -1597,7 +1535,7 @@ void SpeculativeJIT::checkArgumentTypes()
bool SpeculativeJIT::compile()
{
checkArgumentTypes();
-
+
ASSERT(!m_currentNode);
for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
m_jit.setForBlockIndex(blockIndex);
@@ -1635,15 +1573,6 @@ void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
}
ASSERT(osrEntryIndex == m_osrEntryHeads.size());
-
- if (verboseCompilationEnabled()) {
- DumpContext dumpContext;
- dataLog("OSR Entries:\n");
- for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
- dataLog(" ", inContext(entryData, &dumpContext), "\n");
- if (!dumpContext.isEmpty())
- dumpContext.dump(WTF::dataFile());
- }
}
void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
@@ -1818,24 +1747,15 @@ void SpeculativeJIT::compileGetByValOnString(Node* node)
m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
#endif
- JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
+ JSGlobalObject* globalObject = m_jit.globalObjectFor(node->codeOrigin);
if (globalObject->stringPrototypeChainIsSane()) {
- // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
- // loads return a trivial value". Something like SaneChainOutOfBounds. This should
- // speculate that we don't take negative out-of-bounds, or better yet, it should rely
- // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
- // indexed properties either.
- // https://bugs.webkit.org/show_bug.cgi?id=144668
- m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
- m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
-
#if USE(JSVALUE64)
- addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
- outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
+ addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
+ outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg)));
#else
- addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
+ addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
- baseReg, propertyReg));
+ baseReg, propertyReg)));
#endif
} else {
#if USE(JSVALUE64)
@@ -1897,13 +1817,16 @@ GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
case DataFormatJSCell:
case DataFormatJS:
case DataFormatJSBoolean:
- case DataFormatJSDouble:
return GeneratedOperandJSValue;
case DataFormatJSInt32:
case DataFormatInt32:
return GeneratedOperandInteger;
+ case DataFormatJSDouble:
+ case DataFormatDouble:
+ return GeneratedOperandDouble;
+
default:
RELEASE_ASSERT_NOT_REACHED();
return GeneratedOperandTypeUnknown;
@@ -1913,8 +1836,16 @@ GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
void SpeculativeJIT::compileValueToInt32(Node* node)
{
switch (node->child1().useKind()) {
+ case Int32Use: {
+ SpeculateInt32Operand op1(this, node->child1());
+ GPRTemporary result(this, Reuse, op1);
+ m_jit.move(op1.gpr(), result.gpr());
+ int32Result(result.gpr(), node, op1.format());
+ return;
+ }
+
#if USE(JSVALUE64)
- case Int52RepUse: {
+ case MachineIntUse: {
SpeculateStrictInt52Operand op1(this, node->child1());
GPRTemporary result(this, Reuse, op1);
GPRReg op1GPR = op1.gpr();
@@ -1924,19 +1855,6 @@ void SpeculativeJIT::compileValueToInt32(Node* node)
return;
}
#endif // USE(JSVALUE64)
-
- case DoubleRepUse: {
- GPRTemporary result(this);
- SpeculateDoubleOperand op1(this, node->child1());
- FPRReg fpr = op1.fpr();
- GPRReg gpr = result.gpr();
- JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
-
- addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
-
- int32Result(gpr, node);
- return;
- }
case NumberUse:
case NotCellUse: {
@@ -1948,6 +1866,18 @@ void SpeculativeJIT::compileValueToInt32(Node* node)
int32Result(result.gpr(), node, op1.format());
return;
}
+ case GeneratedOperandDouble: {
+ GPRTemporary result(this);
+ SpeculateDoubleOperand op1(this, node->child1(), ManualOperandSpeculation);
+ FPRReg fpr = op1.fpr();
+ GPRReg gpr = result.gpr();
+ JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
+
+ addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
+
+ int32Result(gpr, node);
+ return;
+ }
case GeneratedOperandJSValue: {
GPRTemporary result(this);
#if USE(JSVALUE64)
@@ -1963,14 +1893,16 @@ void SpeculativeJIT::compileValueToInt32(Node* node)
if (node->child1().useKind() == NumberUse) {
DFG_TYPE_CHECK(
- JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
+ JSValueRegs(gpr), node->child1(), SpecFullNumber,
m_jit.branchTest64(
MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
} else {
JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
DFG_TYPE_CHECK(
- JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
+ JSValueRegs(gpr), node->child1(), ~SpecCell,
+ m_jit.branchTest64(
+ JITCompiler::Zero, gpr, GPRInfo::tagMaskRegister));
// It's not a cell: so true turns into 1 and all else turns into 0.
m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
@@ -2017,7 +1949,7 @@ void SpeculativeJIT::compileValueToInt32(Node* node)
if (node->child1().useKind() == NumberUse) {
DFG_TYPE_CHECK(
- op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
+ JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecFullNumber,
m_jit.branch32(
MacroAssembler::AboveOrEqual, tagGPR,
TrustedImm32(JSValue::LowestTag)));
@@ -2025,8 +1957,9 @@ void SpeculativeJIT::compileValueToInt32(Node* node)
JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
DFG_TYPE_CHECK(
- op1.jsValueRegs(), node->child1(), ~SpecCell,
- m_jit.branchIfCell(op1.jsValueRegs()));
+ JSValueRegs(tagGPR, payloadGPR), node->child1(), ~SpecCell,
+ m_jit.branch32(
+ JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::CellTag)));
// It's not a cell: so true turns into 1 and all else turns into 0.
JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
@@ -2065,6 +1998,17 @@ void SpeculativeJIT::compileValueToInt32(Node* node)
return;
}
+ case BooleanUse: {
+ SpeculateBooleanOperand op1(this, node->child1());
+ GPRTemporary result(this, Reuse, op1);
+
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.and32(JITCompiler::TrustedImm32(1), result.gpr());
+
+ int32Result(result.gpr(), node);
+ return;
+ }
+
default:
ASSERT(!m_compileOkay);
return;
@@ -2125,220 +2069,70 @@ void SpeculativeJIT::compileDoubleAsInt32(Node* node)
int32Result(resultGPR, node);
}
-void SpeculativeJIT::compileDoubleRep(Node* node)
+void SpeculativeJIT::compileInt32ToDouble(Node* node)
{
- switch (node->child1().useKind()) {
- case RealNumberUse: {
- JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
+ ASSERT(!isInt32Constant(node->child1().node())); // This should have been constant folded.
+
+ if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
+ SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
FPRTemporary result(this);
-
- JSValueRegs op1Regs = op1.jsValueRegs();
- FPRReg resultFPR = result.fpr();
-
-#if USE(JSVALUE64)
- GPRTemporary temp(this);
- GPRReg tempGPR = temp.gpr();
- m_jit.move(op1Regs.gpr(), tempGPR);
- m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
-#else
- FPRTemporary temp(this);
- FPRReg tempFPR = temp.fpr();
- unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
-#endif
-
- JITCompiler::Jump done = m_jit.branchDouble(
- JITCompiler::DoubleEqual, resultFPR, resultFPR);
-
- DFG_TYPE_CHECK(
- op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
- m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
-
- done.link(&m_jit);
-
- doubleResult(resultFPR, node);
+ m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
+ doubleResult(result.fpr(), node);
return;
}
- case NotCellUse:
- case NumberUse: {
- ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
-
- SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
- if (isInt32Speculation(possibleTypes)) {
- SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
- FPRTemporary result(this);
- m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
- doubleResult(result.fpr(), node);
- return;
- }
-
- JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
- FPRTemporary result(this);
-
+ JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
+ FPRTemporary result(this);
+
#if USE(JSVALUE64)
- GPRTemporary temp(this);
-
- GPRReg op1GPR = op1.gpr();
- GPRReg tempGPR = temp.gpr();
- FPRReg resultFPR = result.fpr();
- JITCompiler::JumpList done;
-
- JITCompiler::Jump isInteger = m_jit.branch64(
- MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
-
- if (node->child1().useKind() == NotCellUse) {
- JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
- JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
-
- static const double zero = 0;
- m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
-
- JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
- done.append(isNull);
-
- DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
- m_jit.branchTest64(JITCompiler::NonZero, op1GPR, TrustedImm32(static_cast<int32_t>(~1))));
-
- JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
- static const double one = 1;
- m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
- done.append(isFalse);
-
- isUndefined.link(&m_jit);
- static const double NaN = PNaN;
- m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
- done.append(m_jit.jump());
+ GPRTemporary temp(this);
- isNumber.link(&m_jit);
- } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
- typeCheck(
- JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
- m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
- }
+ GPRReg op1GPR = op1.gpr();
+ GPRReg tempGPR = temp.gpr();
+ FPRReg resultFPR = result.fpr();
- m_jit.move(op1GPR, tempGPR);
- unboxDouble(tempGPR, resultFPR);
- done.append(m_jit.jump());
+ JITCompiler::Jump isInteger = m_jit.branch64(
+ MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
- isInteger.link(&m_jit);
- m_jit.convertInt32ToDouble(op1GPR, resultFPR);
- done.link(&m_jit);
-#else // USE(JSVALUE64) -> this is the 32_64 case
- FPRTemporary temp(this);
+ if (needsTypeCheck(node->child1(), SpecFullNumber)) {
+ typeCheck(
+ JSValueRegs(op1GPR), node->child1(), SpecFullNumber,
+ m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
+ }
- GPRReg op1TagGPR = op1.tagGPR();
- GPRReg op1PayloadGPR = op1.payloadGPR();
- FPRReg tempFPR = temp.fpr();
- FPRReg resultFPR = result.fpr();
- JITCompiler::JumpList done;
+ m_jit.move(op1GPR, tempGPR);
+ unboxDouble(tempGPR, resultFPR);
+ JITCompiler::Jump done = m_jit.jump();
- JITCompiler::Jump isInteger = m_jit.branch32(
- MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
-
- if (node->child1().useKind() == NotCellUse) {
- JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
- JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
-
- static const double zero = 0;
- m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
-
- JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
- done.append(isNull);
-
- DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
-
- JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
- static const double one = 1;
- m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
- done.append(isFalse);
-
- isUndefined.link(&m_jit);
- static const double NaN = PNaN;
- m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
- done.append(m_jit.jump());
-
- isNumber.link(&m_jit);
- } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
- typeCheck(
- JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
- m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
- }
-
- unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
- done.append(m_jit.jump());
+ isInteger.link(&m_jit);
+ m_jit.convertInt32ToDouble(op1GPR, resultFPR);
+ done.link(&m_jit);
+#else
+ FPRTemporary temp(this);
- isInteger.link(&m_jit);
- m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
- done.link(&m_jit);
-#endif // USE(JSVALUE64)
+ GPRReg op1TagGPR = op1.tagGPR();
+ GPRReg op1PayloadGPR = op1.payloadGPR();
+ FPRReg tempFPR = temp.fpr();
+ FPRReg resultFPR = result.fpr();
- doubleResult(resultFPR, node);
- return;
- }
-
-#if USE(JSVALUE64)
- case Int52RepUse: {
- SpeculateStrictInt52Operand value(this, node->child1());
- FPRTemporary result(this);
-
- GPRReg valueGPR = value.gpr();
- FPRReg resultFPR = result.fpr();
-
- m_jit.convertInt64ToDouble(valueGPR, resultFPR);
-
- doubleResult(resultFPR, node);
- return;
- }
-#endif // USE(JSVALUE64)
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return;
- }
-}
-
-void SpeculativeJIT::compileValueRep(Node* node)
-{
- switch (node->child1().useKind()) {
- case DoubleRepUse: {
- SpeculateDoubleOperand value(this, node->child1());
- JSValueRegsTemporary result(this);
-
- FPRReg valueFPR = value.fpr();
- JSValueRegs resultRegs = result.regs();
-
- // It's very tempting to in-place filter the value to indicate that it's not impure NaN
- // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
- // subject to a prior SetLocal, filtering the value would imply that the corresponding
- // local was purified.
- if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
- m_jit.purifyNaN(valueFPR);
-
- boxDouble(valueFPR, resultRegs);
-
- jsValueResult(resultRegs, node);
- return;
- }
-
-#if USE(JSVALUE64)
- case Int52RepUse: {
- SpeculateStrictInt52Operand value(this, node->child1());
- GPRTemporary result(this);
-
- GPRReg valueGPR = value.gpr();
- GPRReg resultGPR = result.gpr();
-
- boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
-
- jsValueResult(resultGPR, node);
- return;
- }
-#endif // USE(JSVALUE64)
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return;
+ JITCompiler::Jump isInteger = m_jit.branch32(
+ MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
+
+ if (needsTypeCheck(node->child1(), SpecFullNumber)) {
+ typeCheck(
+ JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecFullNumber,
+ m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
}
+
+ unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
+ JITCompiler::Jump done = m_jit.jump();
+
+ isInteger.link(&m_jit);
+ m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
+ done.link(&m_jit);
+#endif
+
+ doubleResult(resultFPR, node);
}
static double clampDoubleToByte(double d)
@@ -2369,12 +2163,12 @@ static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg sou
static const double zero = 0;
static const double byteMax = 255;
static const double half = 0.5;
- jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
+ jit.loadDouble(&zero, scratch);
MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
- jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
+ jit.loadDouble(&byteMax, scratch);
MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
- jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
+ jit.loadDouble(&half, scratch);
// FIXME: This should probably just use a floating point round!
// https://bugs.webkit.org/show_bug.cgi?id=72054
jit.addDouble(source, scratch);
@@ -2397,12 +2191,10 @@ JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRRe
{
if (node->op() == PutByValAlias)
return JITCompiler::Jump();
- JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
- m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
- if (view) {
+ if (JSArrayBufferView* view = m_jit.graph().tryGetFoldableViewForChild1(node)) {
uint32_t length = view->length();
Node* indexNode = m_jit.graph().child(node, 1).node();
- if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
+ if (m_jit.graph().isInt32Constant(indexNode) && static_cast<uint32_t>(m_jit.graph().valueOfInt32Constant(indexNode)) < length)
return JITCompiler::Jump();
return m_jit.branch32(
MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
@@ -2441,13 +2233,13 @@ void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType t
switch (elementSize(type)) {
case 1:
if (isSigned(type))
- m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
+ m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
else
m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
break;
case 2:
if (isSigned(type))
- m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
+ m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
else
m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
break;
@@ -2498,7 +2290,7 @@ void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg propert
GPRReg valueGPR = InvalidGPRReg;
if (valueUse->isConstant()) {
- JSValue jsValue = valueUse->asJSValue();
+ JSValue jsValue = valueOfJSConstant(valueUse.node());
if (!jsValue.isNumber()) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
noResult(node);
@@ -2531,7 +2323,7 @@ void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg propert
}
#if USE(JSVALUE64)
- case Int52RepUse: {
+ case MachineIntUse: {
SpeculateStrictInt52Operand valueOp(this, valueUse);
GPRTemporary scratch(this);
GPRReg scratchReg = scratch.gpr();
@@ -2555,7 +2347,7 @@ void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg propert
}
#endif // USE(JSVALUE64)
- case DoubleRepUse: {
+ case NumberUse: {
if (isClamped(type)) {
ASSERT(elementSize(type) == 1);
SpeculateDoubleOperand valueOp(this, valueUse);
@@ -2598,10 +2390,6 @@ void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg propert
ASSERT(valueGPR != base);
ASSERT(valueGPR != storageReg);
MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
- if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
- speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
- outOfBounds = MacroAssembler::Jump();
- }
switch (elementSize(type)) {
case 1:
@@ -2651,6 +2439,11 @@ void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType
RELEASE_ASSERT_NOT_REACHED();
}
+ MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, resultReg, resultReg);
+ static const double NaN = QNaN;
+ m_jit.loadDouble(&NaN, resultReg);
+ notNaN.link(&m_jit);
+
doubleResult(resultReg, node);
}
@@ -2672,10 +2465,6 @@ void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg prope
ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
- if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
- speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
- outOfBounds = MacroAssembler::Jump();
- }
switch (elementSize(type)) {
case 4: {
@@ -2695,23 +2484,26 @@ void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg prope
noResult(node);
}
-void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
+void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg)
{
// Check that prototype is an object.
- speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
+ m_jit.loadPtr(MacroAssembler::Address(prototypeReg, JSCell::structureOffset()), scratchReg);
+ speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(scratchReg));
// Initialize scratchReg with the value being checked.
m_jit.move(valueReg, scratchReg);
// Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
MacroAssembler::Label loop(&m_jit);
- m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
- m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
- MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
+ m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
#if USE(JSVALUE64)
- m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
+ m_jit.load64(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
+ MacroAssembler::Jump isInstance = m_jit.branch64(MacroAssembler::Equal, scratchReg, prototypeReg);
+ m_jit.branchTest64(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
#else
- m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
+ m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
+ MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
+ m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
#endif
// No match - result is false.
@@ -2742,25 +2534,34 @@ void SpeculativeJIT::compileInstanceOf(Node* node)
JSValueOperand value(this, node->child1());
SpeculateCellOperand prototype(this, node->child2());
GPRTemporary scratch(this);
- GPRTemporary scratch2(this);
GPRReg prototypeReg = prototype.gpr();
GPRReg scratchReg = scratch.gpr();
- GPRReg scratch2Reg = scratch2.gpr();
- MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
- GPRReg valueReg = value.jsValueRegs().payloadGPR();
- moveFalseTo(scratchReg);
+#if USE(JSVALUE64)
+ GPRReg valueReg = value.gpr();
+ MacroAssembler::Jump isCell = m_jit.branchTest64(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
+#else
+ GPRReg valueTagReg = value.tagGPR();
+ GPRReg valueReg = value.payloadGPR();
+ MacroAssembler::Jump isCell = m_jit.branch32(MacroAssembler::Equal, valueTagReg, TrustedImm32(JSValue::CellTag));
+ m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
+#endif
MacroAssembler::Jump done = m_jit.jump();
isCell.link(&m_jit);
- compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
+ compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
done.link(&m_jit);
- blessedBooleanResult(scratchReg, node);
+#if USE(JSVALUE64)
+ jsValueResult(scratchReg, node, DataFormatJSBoolean);
+#else
+ booleanResult(scratchReg, node);
+#endif
return;
}
@@ -2768,16 +2569,18 @@ void SpeculativeJIT::compileInstanceOf(Node* node)
SpeculateCellOperand prototype(this, node->child2());
GPRTemporary scratch(this);
- GPRTemporary scratch2(this);
GPRReg valueReg = value.gpr();
GPRReg prototypeReg = prototype.gpr();
GPRReg scratchReg = scratch.gpr();
- GPRReg scratch2Reg = scratch2.gpr();
- compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
+ compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
- blessedBooleanResult(scratchReg, node);
+#if USE(JSVALUE64)
+ jsValueResult(scratchReg, node, DataFormatJSBoolean);
+#else
+ booleanResult(scratchReg, node);
+#endif
}
void SpeculativeJIT::compileAdd(Node* node)
@@ -2786,8 +2589,8 @@ void SpeculativeJIT::compileAdd(Node* node)
case Int32Use: {
ASSERT(!shouldCheckNegativeZero(node->arithMode()));
- if (node->child1()->isInt32Constant()) {
- int32_t imm1 = node->child1()->asInt32();
+ if (isNumberConstant(node->child1().node())) {
+ int32_t imm1 = valueOfInt32Constant(node->child1().node());
SpeculateInt32Operand op2(this, node->child2());
GPRTemporary result(this);
@@ -2801,9 +2604,9 @@ void SpeculativeJIT::compileAdd(Node* node)
return;
}
- if (node->child2()->isInt32Constant()) {
+ if (isNumberConstant(node->child2().node())) {
SpeculateInt32Operand op1(this, node->child1());
- int32_t imm2 = node->child2()->asInt32();
+ int32_t imm2 = valueOfInt32Constant(node->child2().node());
GPRTemporary result(this);
if (!shouldCheckOverflow(node->arithMode())) {
@@ -2847,7 +2650,7 @@ void SpeculativeJIT::compileAdd(Node* node)
}
#if USE(JSVALUE64)
- case Int52RepUse: {
+ case MachineIntUse: {
ASSERT(shouldCheckOverflow(node->arithMode()));
ASSERT(!shouldCheckNegativeZero(node->arithMode()));
@@ -2876,7 +2679,7 @@ void SpeculativeJIT::compileAdd(Node* node)
}
#endif // USE(JSVALUE64)
- case DoubleRepUse: {
+ case NumberUse: {
SpeculateDoubleOperand op1(this, node->child1());
SpeculateDoubleOperand op2(this, node->child2());
FPRTemporary result(this, op1, op2);
@@ -2924,7 +2727,7 @@ void SpeculativeJIT::compileMakeRope(Node* node)
GPRReg scratchGPR = scratch.gpr();
JITCompiler::JumpList slowPath;
- MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
+ MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
@@ -2938,7 +2741,7 @@ void SpeculativeJIT::compileMakeRope(Node* node)
if (!ASSERT_DISABLED) {
JITCompiler::Jump ok = m_jit.branch32(
JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
- m_jit.abortWithReason(DFGNegativeStringLength);
+ m_jit.breakpoint();
ok.link(&m_jit);
}
for (unsigned i = 1; i < numOpGPRs; ++i) {
@@ -2954,7 +2757,7 @@ void SpeculativeJIT::compileMakeRope(Node* node)
if (!ASSERT_DISABLED) {
JITCompiler::Jump ok = m_jit.branch32(
JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
- m_jit.abortWithReason(DFGNegativeStringLength);
+ m_jit.breakpoint();
ok.link(&m_jit);
}
m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
@@ -2976,26 +2779,15 @@ void SpeculativeJIT::compileMakeRope(Node* node)
cellResult(resultGPR, node);
}
-void SpeculativeJIT::compileArithClz32(Node* node)
-{
- ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
- SpeculateInt32Operand value(this, node->child1());
- GPRTemporary result(this, Reuse, value);
- GPRReg valueReg = value.gpr();
- GPRReg resultReg = result.gpr();
- m_jit.countLeadingZeros32(valueReg, resultReg);
- int32Result(resultReg, node);
-}
-
void SpeculativeJIT::compileArithSub(Node* node)
{
switch (node->binaryUseKind()) {
case Int32Use: {
ASSERT(!shouldCheckNegativeZero(node->arithMode()));
- if (node->child2()->isInt32Constant()) {
+ if (isNumberConstant(node->child2().node())) {
SpeculateInt32Operand op1(this, node->child1());
- int32_t imm2 = node->child2()->asInt32();
+ int32_t imm2 = valueOfInt32Constant(node->child2().node());
GPRTemporary result(this);
if (!shouldCheckOverflow(node->arithMode())) {
@@ -3010,8 +2802,8 @@ void SpeculativeJIT::compileArithSub(Node* node)
return;
}
- if (node->child1()->isInt32Constant()) {
- int32_t imm1 = node->child1()->asInt32();
+ if (isNumberConstant(node->child1().node())) {
+ int32_t imm1 = valueOfInt32Constant(node->child1().node());
SpeculateInt32Operand op2(this, node->child2());
GPRTemporary result(this);
@@ -3040,7 +2832,7 @@ void SpeculativeJIT::compileArithSub(Node* node)
}
#if USE(JSVALUE64)
- case Int52RepUse: {
+ case MachineIntUse: {
ASSERT(shouldCheckOverflow(node->arithMode()));
ASSERT(!shouldCheckNegativeZero(node->arithMode()));
@@ -3069,7 +2861,7 @@ void SpeculativeJIT::compileArithSub(Node* node)
}
#endif // USE(JSVALUE64)
- case DoubleRepUse: {
+ case NumberUse: {
SpeculateDoubleOperand op1(this, node->child1());
SpeculateDoubleOperand op2(this, node->child2());
FPRTemporary result(this, op1);
@@ -3114,7 +2906,7 @@ void SpeculativeJIT::compileArithNegate(Node* node)
}
#if USE(JSVALUE64)
- case Int52RepUse: {
+ case MachineIntUse: {
ASSERT(shouldCheckOverflow(node->arithMode()));
if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
@@ -3151,7 +2943,7 @@ void SpeculativeJIT::compileArithNegate(Node* node)
}
#endif // USE(JSVALUE64)
- case DoubleRepUse: {
+ case NumberUse: {
SpeculateDoubleOperand op1(this, node->child1());
FPRTemporary result(this);
@@ -3202,7 +2994,7 @@ void SpeculativeJIT::compileArithMul(Node* node)
}
#if USE(JSVALUE64)
- case Int52RepUse: {
+ case MachineIntUse: {
ASSERT(shouldCheckOverflow(node->arithMode()));
// This is super clever. We want to do an int52 multiplication and check the
@@ -3259,7 +3051,7 @@ void SpeculativeJIT::compileArithMul(Node* node)
}
#endif // USE(JSVALUE64)
- case DoubleRepUse: {
+ case NumberUse: {
SpeculateDoubleOperand op1(this, node->child1());
SpeculateDoubleOperand op2(this, node->child2());
FPRTemporary result(this, op1, op2);
@@ -3365,7 +3157,33 @@ void SpeculativeJIT::compileArithDiv(Node* node)
done.link(&m_jit);
int32Result(eax.gpr(), node);
-#elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
+#elif CPU(APPLE_ARMV7S)
+ SpeculateInt32Operand op1(this, node->child1());
+ SpeculateInt32Operand op2(this, node->child2());
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+ GPRTemporary quotient(this);
+ GPRTemporary multiplyAnswer(this);
+
+ // If the user cares about negative zero, then speculate that we're not about
+ // to produce negative zero.
+ if (shouldCheckNegativeZero(node->arithMode())) {
+ MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
+ speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
+ numeratorNonZero.link(&m_jit);
+ }
+
+ m_jit.assembler().sdiv(quotient.gpr(), op1GPR, op2GPR);
+
+ // Check that there was no remainder. If there had been, then we'd be obligated to
+ // produce a double result instead.
+ if (shouldCheckOverflow(node->arithMode())) {
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
+ }
+
+ int32Result(quotient.gpr(), node);
+#elif CPU(ARM64)
SpeculateInt32Operand op1(this, node->child1());
SpeculateInt32Operand op2(this, node->child2());
GPRReg op1GPR = op1.gpr();
@@ -3397,7 +3215,7 @@ void SpeculativeJIT::compileArithDiv(Node* node)
break;
}
- case DoubleRepUse: {
+ case NumberUse: {
SpeculateDoubleOperand op1(this, node->child1());
SpeculateDoubleOperand op2(this, node->child2());
FPRTemporary result(this, op1);
@@ -3424,8 +3242,8 @@ void SpeculativeJIT::compileArithMod(Node* node)
// (in case of |dividend| < |divisor|), so we speculate it as strict int32.
SpeculateStrictInt32Operand op1(this, node->child1());
- if (node->child2()->isInt32Constant()) {
- int32_t divisor = node->child2()->asInt32();
+ if (isInt32Constant(node->child2().node())) {
+ int32_t divisor = valueOfInt32Constant(node->child2().node());
if (divisor > 1 && hasOneBitSet(divisor)) {
unsigned logarithm = WTF::fastLog2(divisor);
GPRReg dividendGPR = op1.gpr();
@@ -3486,8 +3304,8 @@ void SpeculativeJIT::compileArithMod(Node* node)
}
#if CPU(X86) || CPU(X86_64)
- if (node->child2()->isInt32Constant()) {
- int32_t divisor = node->child2()->asInt32();
+ if (isInt32Constant(node->child2().node())) {
+ int32_t divisor = valueOfInt32Constant(node->child2().node());
if (divisor && divisor != -1) {
GPRReg op1Gpr = op1.gpr();
@@ -3618,7 +3436,7 @@ void SpeculativeJIT::compileArithMod(Node* node)
done.link(&m_jit);
int32Result(edx.gpr(), node);
-#elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
+#elif CPU(APPLE_ARMV7S)
GPRTemporary temp(this);
GPRTemporary quotientThenRemainder(this);
GPRTemporary multiplyAnswer(this);
@@ -3627,27 +3445,38 @@ void SpeculativeJIT::compileArithMod(Node* node)
GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
- JITCompiler::JumpList done;
-
- if (shouldCheckOverflow(node->arithMode()))
- speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
- else {
- JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
- m_jit.move(divisorGPR, quotientThenRemainderGPR);
- done.append(m_jit.jump());
- denominatorNotZero.link(&m_jit);
+ m_jit.assembler().sdiv(quotientThenRemainderGPR, dividendGPR, divisorGPR);
+ // FIXME: It seems like there are cases where we don't need this? What if we have
+ // arithMode() == Arith::Unchecked?
+ // https://bugs.webkit.org/show_bug.cgi?id=126444
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
+ m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
+
+ // If the user cares about negative zero, then speculate that we're not about
+ // to produce negative zero.
+ if (shouldCheckNegativeZero(node->arithMode())) {
+ // Check that we're not about to create negative zero.
+ JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
+ numeratorPositive.link(&m_jit);
}
+ int32Result(quotientThenRemainderGPR, node);
+#elif CPU(ARM64)
+ GPRTemporary temp(this);
+ GPRTemporary quotientThenRemainder(this);
+ GPRTemporary multiplyAnswer(this);
+ GPRReg dividendGPR = op1.gpr();
+ GPRReg divisorGPR = op2.gpr();
+ GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
+ GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
+
m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
// FIXME: It seems like there are cases where we don't need this? What if we have
// arithMode() == Arith::Unchecked?
// https://bugs.webkit.org/show_bug.cgi?id=126444
speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
-#if HAVE(ARM_IDIV_INSTRUCTIONS)
- m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
-#else
m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
-#endif
// If the user cares about negative zero, then speculate that we're not about
// to produce negative zero.
@@ -3658,8 +3487,6 @@ void SpeculativeJIT::compileArithMod(Node* node)
numeratorPositive.link(&m_jit);
}
- done.link(&m_jit);
-
int32Result(quotientThenRemainderGPR, node);
#else // not architecture that can do integer division
RELEASE_ASSERT_NOT_REACHED();
@@ -3667,7 +3494,7 @@ void SpeculativeJIT::compileArithMod(Node* node)
return;
}
- case DoubleRepUse: {
+ case NumberUse: {
SpeculateDoubleOperand op1(this, node->child1());
SpeculateDoubleOperand op2(this, node->child2());
@@ -3690,160 +3517,6 @@ void SpeculativeJIT::compileArithMod(Node* node)
}
}
-void SpeculativeJIT::compileArithRound(Node* node)
-{
- ASSERT(node->child1().useKind() == DoubleRepUse);
-
- SpeculateDoubleOperand value(this, node->child1());
- FPRReg valueFPR = value.fpr();
-
- if (producesInteger(node->arithRoundingMode()) && !shouldCheckNegativeZero(node->arithRoundingMode())) {
- FPRTemporary oneHalf(this);
- GPRTemporary roundedResultAsInt32(this);
- FPRReg oneHalfFPR = oneHalf.fpr();
- GPRReg resultGPR = roundedResultAsInt32.gpr();
-
- static const double halfConstant = 0.5;
- m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), oneHalfFPR);
- m_jit.addDouble(valueFPR, oneHalfFPR);
-
- JITCompiler::Jump truncationFailed = m_jit.branchTruncateDoubleToInt32(oneHalfFPR, resultGPR);
- speculationCheck(Overflow, JSValueRegs(), node, truncationFailed);
- int32Result(resultGPR, node);
- return;
- }
-
- flushRegisters();
- FPRResult roundedResultAsDouble(this);
- FPRReg resultFPR = roundedResultAsDouble.fpr();
- callOperation(jsRound, resultFPR, valueFPR);
- if (producesInteger(node->arithRoundingMode())) {
- GPRTemporary roundedResultAsInt32(this);
- FPRTemporary scratch(this);
- FPRReg scratchFPR = scratch.fpr();
- GPRReg resultGPR = roundedResultAsInt32.gpr();
- JITCompiler::JumpList failureCases;
- m_jit.branchConvertDoubleToInt32(resultFPR, resultGPR, failureCases, scratchFPR);
- speculationCheck(Overflow, JSValueRegs(), node, failureCases);
-
- int32Result(resultGPR, node);
- } else
- doubleResult(resultFPR, node);
-}
-
-void SpeculativeJIT::compileArithSqrt(Node* node)
-{
- SpeculateDoubleOperand op1(this, node->child1());
- FPRReg op1FPR = op1.fpr();
-
- if (!MacroAssembler::supportsFloatingPointSqrt() || !Options::enableArchitectureSpecificOptimizations()) {
- flushRegisters();
- FPRResult result(this);
- callOperation(sqrt, result.fpr(), op1FPR);
- doubleResult(result.fpr(), node);
- } else {
- FPRTemporary result(this, op1);
- m_jit.sqrtDouble(op1.fpr(), result.fpr());
- doubleResult(result.fpr(), node);
- }
-}
-
-// For small positive integers , it is worth doing a tiny inline loop to exponentiate the base.
-// Every register is clobbered by this helper.
-static MacroAssembler::Jump compileArithPowIntegerFastPath(JITCompiler& assembler, FPRReg xOperand, GPRReg yOperand, FPRReg result)
-{
- MacroAssembler::JumpList skipFastPath;
- skipFastPath.append(assembler.branch32(MacroAssembler::LessThan, yOperand, MacroAssembler::TrustedImm32(0)));
- skipFastPath.append(assembler.branch32(MacroAssembler::GreaterThan, yOperand, MacroAssembler::TrustedImm32(1000)));
-
- static const double oneConstant = 1.0;
- assembler.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), result);
-
- MacroAssembler::Label startLoop(assembler.label());
- MacroAssembler::Jump exponentIsEven = assembler.branchTest32(MacroAssembler::Zero, yOperand, MacroAssembler::TrustedImm32(1));
- assembler.mulDouble(xOperand, result);
- exponentIsEven.link(&assembler);
- assembler.mulDouble(xOperand, xOperand);
- assembler.rshift32(MacroAssembler::TrustedImm32(1), yOperand);
- assembler.branchTest32(MacroAssembler::NonZero, yOperand).linkTo(startLoop, &assembler);
-
- MacroAssembler::Jump skipSlowPath = assembler.jump();
- skipFastPath.link(&assembler);
-
- return skipSlowPath;
-}
-
-void SpeculativeJIT::compileArithPow(Node* node)
-{
- if (node->child2().useKind() == Int32Use) {
- SpeculateDoubleOperand xOperand(this, node->child1());
- SpeculateInt32Operand yOperand(this, node->child2());
- FPRReg xOperandfpr = xOperand.fpr();
- GPRReg yOperandGpr = yOperand.gpr();
- FPRTemporary yOperandfpr(this);
-
- flushRegisters();
-
- FPRResult result(this);
- FPRReg resultFpr = result.fpr();
-
- FPRTemporary xOperandCopy(this);
- FPRReg xOperandCopyFpr = xOperandCopy.fpr();
- m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);
-
- GPRTemporary counter(this);
- GPRReg counterGpr = counter.gpr();
- m_jit.move(yOperandGpr, counterGpr);
-
- MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, counterGpr, resultFpr);
- m_jit.convertInt32ToDouble(yOperandGpr, yOperandfpr.fpr());
- callOperation(operationMathPow, resultFpr, xOperandfpr, yOperandfpr.fpr());
-
- skipFallback.link(&m_jit);
- doubleResult(resultFpr, node);
- return;
- }
-
- SpeculateDoubleOperand xOperand(this, node->child1());
- SpeculateDoubleOperand yOperand(this, node->child2());
- FPRReg xOperandfpr = xOperand.fpr();
- FPRReg yOperandfpr = yOperand.fpr();
-
- flushRegisters();
-
- FPRResult result(this);
- FPRReg resultFpr = result.fpr();
-
- FPRTemporary xOperandCopy(this);
- FPRReg xOperandCopyFpr = xOperandCopy.fpr();
-
- FPRTemporary scratch(this);
- FPRReg scratchFpr = scratch.fpr();
-
- GPRTemporary yOperandInteger(this);
- GPRReg yOperandIntegerGpr = yOperandInteger.gpr();
- MacroAssembler::JumpList failedExponentConversionToInteger;
- m_jit.branchConvertDoubleToInt32(yOperandfpr, yOperandIntegerGpr, failedExponentConversionToInteger, scratchFpr, false);
-
- m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);
- MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, yOperandInteger.gpr(), resultFpr);
- failedExponentConversionToInteger.link(&m_jit);
-
- callOperation(operationMathPow, resultFpr, xOperandfpr, yOperandfpr);
- skipFallback.link(&m_jit);
- doubleResult(resultFpr, node);
-}
-
-void SpeculativeJIT::compileArithLog(Node* node)
-{
- SpeculateDoubleOperand op1(this, node->child1());
- FPRReg op1FPR = op1.fpr();
- flushRegisters();
- FPRResult result(this);
- callOperation(log, result.fpr(), op1FPR);
- doubleResult(result.fpr(), node);
-}
-
// Returns true if the compare is fused with a subsequent branch.
bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
{
@@ -3856,13 +3529,13 @@ bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition con
}
#if USE(JSVALUE64)
- if (node->isBinaryUseKind(Int52RepUse)) {
+ if (node->isBinaryUseKind(MachineIntUse)) {
compileInt52Compare(node, condition);
return false;
}
#endif // USE(JSVALUE64)
- if (node->isBinaryUseKind(DoubleRepUse)) {
+ if (node->isBinaryUseKind(NumberUse)) {
compileDoubleCompare(node, doubleCondition);
return false;
}
@@ -3888,12 +3561,12 @@ bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition con
return false;
}
- if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
+ if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse) {
compileObjectToObjectOrOtherEquality(node->child1(), node->child2());
return false;
}
- if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
+ if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse) {
compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
return false;
}
@@ -3903,9 +3576,86 @@ bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition con
return false;
}
+bool SpeculativeJIT::compileStrictEqForConstant(Node* node, Edge value, JSValue constant)
+{
+ JSValueOperand op1(this, value);
+
+ // FIXME: This code is wrong for the case that the constant is null or undefined,
+ // and the value is an object that MasqueradesAsUndefined.
+ // https://bugs.webkit.org/show_bug.cgi?id=109487
+
+ unsigned branchIndexInBlock = detectPeepHoleBranch();
+ if (branchIndexInBlock != UINT_MAX) {
+ Node* branchNode = m_block->at(branchIndexInBlock);
+ BasicBlock* taken = branchNode->takenBlock();
+ BasicBlock* notTaken = branchNode->notTakenBlock();
+ MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
+
+ // The branch instruction will branch to the taken block.
+ // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
+ if (taken == nextBlock()) {
+ condition = MacroAssembler::NotEqual;
+ BasicBlock* tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+#if USE(JSVALUE64)
+ branch64(condition, op1.gpr(), MacroAssembler::TrustedImm64(JSValue::encode(constant)), taken);
+#else
+ GPRReg payloadGPR = op1.payloadGPR();
+ GPRReg tagGPR = op1.tagGPR();
+ if (condition == MacroAssembler::Equal) {
+ // Drop down if not equal, go elsewhere if equal.
+ MacroAssembler::Jump notEqual = m_jit.branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()));
+ branch32(MacroAssembler::Equal, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken);
+ notEqual.link(&m_jit);
+ } else {
+ // Drop down if equal, go elsehwere if not equal.
+ branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()), taken);
+ branch32(MacroAssembler::NotEqual, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken);
+ }
+#endif
+
+ jump(notTaken);
+
+ use(node->child1());
+ use(node->child2());
+ m_indexInBlock = branchIndexInBlock;
+ m_currentNode = branchNode;
+ return true;
+ }
+
+ GPRTemporary result(this);
+
+#if USE(JSVALUE64)
+ GPRReg op1GPR = op1.gpr();
+ GPRReg resultGPR = result.gpr();
+ m_jit.move(MacroAssembler::TrustedImm64(ValueFalse), resultGPR);
+ MacroAssembler::Jump notEqual = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, MacroAssembler::TrustedImm64(JSValue::encode(constant)));
+ m_jit.or32(MacroAssembler::TrustedImm32(1), resultGPR);
+ notEqual.link(&m_jit);
+ jsValueResult(resultGPR, node, DataFormatJSBoolean);
+#else
+ GPRReg op1PayloadGPR = op1.payloadGPR();
+ GPRReg op1TagGPR = op1.tagGPR();
+ GPRReg resultGPR = result.gpr();
+ m_jit.move(TrustedImm32(0), resultGPR);
+ MacroAssembler::JumpList notEqual;
+ notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, MacroAssembler::Imm32(constant.tag())));
+ notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1PayloadGPR, MacroAssembler::Imm32(constant.payload())));
+ m_jit.move(TrustedImm32(1), resultGPR);
+ notEqual.link(&m_jit);
+ booleanResult(resultGPR, node);
+#endif
+
+ return false;
+}
+
bool SpeculativeJIT::compileStrictEq(Node* node)
{
- if (node->isBinaryUseKind(BooleanUse)) {
+ switch (node->binaryUseKind()) {
+ case BooleanUse: {
unsigned branchIndexInBlock = detectPeepHoleBranch();
if (branchIndexInBlock != UINT_MAX) {
Node* branchNode = m_block->at(branchIndexInBlock);
@@ -3920,7 +3670,7 @@ bool SpeculativeJIT::compileStrictEq(Node* node)
return false;
}
- if (node->isBinaryUseKind(Int32Use)) {
+ case Int32Use: {
unsigned branchIndexInBlock = detectPeepHoleBranch();
if (branchIndexInBlock != UINT_MAX) {
Node* branchNode = m_block->at(branchIndexInBlock);
@@ -3936,7 +3686,7 @@ bool SpeculativeJIT::compileStrictEq(Node* node)
}
#if USE(JSVALUE64)
- if (node->isBinaryUseKind(Int52RepUse)) {
+ case MachineIntUse: {
unsigned branchIndexInBlock = detectPeepHoleBranch();
if (branchIndexInBlock != UINT_MAX) {
Node* branchNode = m_block->at(branchIndexInBlock);
@@ -3951,8 +3701,8 @@ bool SpeculativeJIT::compileStrictEq(Node* node)
return false;
}
#endif // USE(JSVALUE64)
-
- if (node->isBinaryUseKind(DoubleRepUse)) {
+
+ case NumberUse: {
unsigned branchIndexInBlock = detectPeepHoleBranch();
if (branchIndexInBlock != UINT_MAX) {
Node* branchNode = m_block->at(branchIndexInBlock);
@@ -3966,48 +3716,18 @@ bool SpeculativeJIT::compileStrictEq(Node* node)
compileDoubleCompare(node, MacroAssembler::DoubleEqual);
return false;
}
-
- if (node->isBinaryUseKind(StringUse)) {
+
+ case StringUse: {
compileStringEquality(node);
return false;
}
-
- if (node->isBinaryUseKind(StringIdentUse)) {
+
+ case StringIdentUse: {
compileStringIdentEquality(node);
return false;
}
-
- if (node->isBinaryUseKind(ObjectUse, UntypedUse)) {
- unsigned branchIndexInBlock = detectPeepHoleBranch();
- if (branchIndexInBlock != UINT_MAX) {
- Node* branchNode = m_block->at(branchIndexInBlock);
- compilePeepHoleObjectStrictEquality(node->child1(), node->child2(), branchNode);
- use(node->child1());
- use(node->child2());
- m_indexInBlock = branchIndexInBlock;
- m_currentNode = branchNode;
- return true;
- }
- compileObjectStrictEquality(node->child1(), node->child2());
- return false;
- }
-
- if (node->isBinaryUseKind(UntypedUse, ObjectUse)) {
- unsigned branchIndexInBlock = detectPeepHoleBranch();
- if (branchIndexInBlock != UINT_MAX) {
- Node* branchNode = m_block->at(branchIndexInBlock);
- compilePeepHoleObjectStrictEquality(node->child2(), node->child1(), branchNode);
- use(node->child1());
- use(node->child2());
- m_indexInBlock = branchIndexInBlock;
- m_currentNode = branchNode;
- return true;
- }
- compileObjectStrictEquality(node->child2(), node->child1());
- return false;
- }
-
- if (node->isBinaryUseKind(ObjectUse)) {
+
+ case ObjectUse: {
unsigned branchIndexInBlock = detectPeepHoleBranch();
if (branchIndexInBlock != UINT_MAX) {
Node* branchNode = m_block->at(branchIndexInBlock);
@@ -4021,35 +3741,15 @@ bool SpeculativeJIT::compileStrictEq(Node* node)
compileObjectEquality(node);
return false;
}
-
- if (node->isBinaryUseKind(MiscUse, UntypedUse)
- || node->isBinaryUseKind(UntypedUse, MiscUse)) {
- compileMiscStrictEq(node);
- return false;
- }
-
- if (node->isBinaryUseKind(StringIdentUse, NotStringVarUse)) {
- compileStringIdentToNotStringVarEquality(node, node->child1(), node->child2());
- return false;
- }
-
- if (node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) {
- compileStringIdentToNotStringVarEquality(node, node->child2(), node->child1());
- return false;
- }
-
- if (node->isBinaryUseKind(StringUse, UntypedUse)) {
- compileStringToUntypedEquality(node, node->child1(), node->child2());
- return false;
+
+ case UntypedUse: {
+ return nonSpeculativeStrictEq(node);
}
-
- if (node->isBinaryUseKind(UntypedUse, StringUse)) {
- compileStringToUntypedEquality(node, node->child2(), node->child1());
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
return false;
}
-
- RELEASE_ASSERT(node->isBinaryUseKind(UntypedUse));
- return nonSpeculativeStrictEq(node);
}
void SpeculativeJIT::compileBooleanCompare(Node* node, MacroAssembler::RelationalCondition condition)
@@ -4060,20 +3760,44 @@ void SpeculativeJIT::compileBooleanCompare(Node* node, MacroAssembler::Relationa
m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
- unblessedBooleanResult(result.gpr(), node);
+ // If we add a DataFormatBool, we should use it here.
+#if USE(JSVALUE32_64)
+ booleanResult(result.gpr(), node);
+#else
+ m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
+ jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
+#endif
}
-void SpeculativeJIT::compileStringEquality(
- Node* node, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR, GPRReg leftTempGPR,
- GPRReg rightTempGPR, GPRReg leftTemp2GPR, GPRReg rightTemp2GPR,
- JITCompiler::JumpList fastTrue, JITCompiler::JumpList fastFalse)
+void SpeculativeJIT::compileStringEquality(Node* node)
{
+ SpeculateCellOperand left(this, node->child1());
+ SpeculateCellOperand right(this, node->child2());
+ GPRTemporary length(this);
+ GPRTemporary leftTemp(this);
+ GPRTemporary rightTemp(this);
+ GPRTemporary leftTemp2(this, Reuse, left);
+ GPRTemporary rightTemp2(this, Reuse, right);
+
+ GPRReg leftGPR = left.gpr();
+ GPRReg rightGPR = right.gpr();
+ GPRReg lengthGPR = length.gpr();
+ GPRReg leftTempGPR = leftTemp.gpr();
+ GPRReg rightTempGPR = rightTemp.gpr();
+ GPRReg leftTemp2GPR = leftTemp2.gpr();
+ GPRReg rightTemp2GPR = rightTemp2.gpr();
+
JITCompiler::JumpList trueCase;
JITCompiler::JumpList falseCase;
JITCompiler::JumpList slowCase;
- trueCase.append(fastTrue);
- falseCase.append(fastFalse);
+ speculateString(node->child1(), leftGPR);
+
+ // It's safe to branch around the type check below, since proving that the values are
+ // equal does indeed prove that the right value is a string.
+ trueCase.append(m_jit.branchPtr(MacroAssembler::Equal, leftGPR, rightGPR));
+
+ speculateString(node->child2(), rightGPR);
m_jit.load32(MacroAssembler::Address(leftGPR, JSString::offsetOfLength()), lengthGPR);
@@ -4115,87 +3839,31 @@ void SpeculativeJIT::compileStringEquality(
m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loop, &m_jit);
trueCase.link(&m_jit);
- moveTrueTo(leftTempGPR);
+#if USE(JSVALUE64)
+ m_jit.move(TrustedImm64(ValueTrue), leftTempGPR);
+#else
+ m_jit.move(TrustedImm32(true), leftTempGPR);
+#endif
JITCompiler::Jump done = m_jit.jump();
falseCase.link(&m_jit);
- moveFalseTo(leftTempGPR);
+#if USE(JSVALUE64)
+ m_jit.move(TrustedImm64(ValueFalse), leftTempGPR);
+#else
+ m_jit.move(TrustedImm32(false), leftTempGPR);
+#endif
done.link(&m_jit);
addSlowPathGenerator(
slowPathCall(
slowCase, this, operationCompareStringEq, leftTempGPR, leftGPR, rightGPR));
- blessedBooleanResult(leftTempGPR, node);
-}
-
-void SpeculativeJIT::compileStringEquality(Node* node)
-{
- SpeculateCellOperand left(this, node->child1());
- SpeculateCellOperand right(this, node->child2());
- GPRTemporary length(this);
- GPRTemporary leftTemp(this);
- GPRTemporary rightTemp(this);
- GPRTemporary leftTemp2(this, Reuse, left);
- GPRTemporary rightTemp2(this, Reuse, right);
-
- GPRReg leftGPR = left.gpr();
- GPRReg rightGPR = right.gpr();
- GPRReg lengthGPR = length.gpr();
- GPRReg leftTempGPR = leftTemp.gpr();
- GPRReg rightTempGPR = rightTemp.gpr();
- GPRReg leftTemp2GPR = leftTemp2.gpr();
- GPRReg rightTemp2GPR = rightTemp2.gpr();
-
- speculateString(node->child1(), leftGPR);
-
- // It's safe to branch around the type check below, since proving that the values are
- // equal does indeed prove that the right value is a string.
- JITCompiler::Jump fastTrue = m_jit.branchPtr(MacroAssembler::Equal, leftGPR, rightGPR);
-
- speculateString(node->child2(), rightGPR);
-
- compileStringEquality(
- node, leftGPR, rightGPR, lengthGPR, leftTempGPR, rightTempGPR, leftTemp2GPR,
- rightTemp2GPR, fastTrue, JITCompiler::Jump());
-}
-
-void SpeculativeJIT::compileStringToUntypedEquality(Node* node, Edge stringEdge, Edge untypedEdge)
-{
- SpeculateCellOperand left(this, stringEdge);
- JSValueOperand right(this, untypedEdge, ManualOperandSpeculation);
- GPRTemporary length(this);
- GPRTemporary leftTemp(this);
- GPRTemporary rightTemp(this);
- GPRTemporary leftTemp2(this, Reuse, left);
- GPRTemporary rightTemp2(this);
-
- GPRReg leftGPR = left.gpr();
- JSValueRegs rightRegs = right.jsValueRegs();
- GPRReg lengthGPR = length.gpr();
- GPRReg leftTempGPR = leftTemp.gpr();
- GPRReg rightTempGPR = rightTemp.gpr();
- GPRReg leftTemp2GPR = leftTemp2.gpr();
- GPRReg rightTemp2GPR = rightTemp2.gpr();
-
- speculateString(stringEdge, leftGPR);
-
- JITCompiler::JumpList fastTrue;
- JITCompiler::JumpList fastFalse;
-
- fastFalse.append(m_jit.branchIfNotCell(rightRegs));
-
- // It's safe to branch around the type check below, since proving that the values are
- // equal does indeed prove that the right value is a string.
- fastTrue.append(m_jit.branchPtr(
- MacroAssembler::Equal, leftGPR, rightRegs.payloadGPR()));
-
- fastFalse.append(m_jit.branchIfNotString(rightRegs.payloadGPR()));
-
- compileStringEquality(
- node, leftGPR, rightRegs.payloadGPR(), lengthGPR, leftTempGPR, rightTempGPR, leftTemp2GPR,
- rightTemp2GPR, fastTrue, fastFalse);
+#if USE(JSVALUE64)
+ jsValueResult(leftTempGPR, node, DataFormatJSBoolean);
+#else
+ booleanResult(leftTempGPR, node);
+#endif
}
void SpeculativeJIT::compileStringIdentEquality(Node* node)
@@ -4210,6 +3878,9 @@ void SpeculativeJIT::compileStringIdentEquality(Node* node)
GPRReg leftTempGPR = leftTemp.gpr();
GPRReg rightTempGPR = rightTemp.gpr();
+ JITCompiler::JumpList trueCase;
+ JITCompiler::JumpList falseCase;
+
speculateString(node->child1(), leftGPR);
speculateString(node->child2(), rightGPR);
@@ -4218,35 +3889,12 @@ void SpeculativeJIT::compileStringIdentEquality(Node* node)
m_jit.comparePtr(MacroAssembler::Equal, leftTempGPR, rightTempGPR, leftTempGPR);
- unblessedBooleanResult(leftTempGPR, node);
-}
-
-void SpeculativeJIT::compileStringIdentToNotStringVarEquality(
- Node* node, Edge stringEdge, Edge notStringVarEdge)
-{
- SpeculateCellOperand left(this, stringEdge);
- JSValueOperand right(this, notStringVarEdge, ManualOperandSpeculation);
- GPRTemporary leftTemp(this);
- GPRTemporary rightTemp(this);
- GPRReg leftTempGPR = leftTemp.gpr();
- GPRReg rightTempGPR = rightTemp.gpr();
- GPRReg leftGPR = left.gpr();
- JSValueRegs rightRegs = right.jsValueRegs();
-
- speculateString(stringEdge, leftGPR);
- speculateStringIdentAndLoadStorage(stringEdge, leftGPR, leftTempGPR);
-
- moveFalseTo(rightTempGPR);
- JITCompiler::JumpList notString;
- notString.append(m_jit.branchIfNotCell(rightRegs));
- notString.append(m_jit.branchIfNotString(rightRegs.payloadGPR()));
-
- speculateStringIdentAndLoadStorage(notStringVarEdge, rightRegs.payloadGPR(), rightTempGPR);
-
- m_jit.comparePtr(MacroAssembler::Equal, leftTempGPR, rightTempGPR, rightTempGPR);
- notString.link(&m_jit);
-
- unblessedBooleanResult(rightTempGPR, node);
+#if USE(JSVALUE64)
+ m_jit.or32(TrustedImm32(ValueFalse), leftTempGPR);
+ jsValueResult(leftTempGPR, node, DataFormatJSBoolean);
+#else
+ booleanResult(leftTempGPR, node);
+#endif
}
void SpeculativeJIT::compileStringZeroLength(Node* node)
@@ -4263,16 +3911,12 @@ void SpeculativeJIT::compileStringZeroLength(Node* node)
// Fetch the length field from the string object.
m_jit.test32(MacroAssembler::Zero, MacroAssembler::Address(strGPR, JSString::offsetOfLength()), MacroAssembler::TrustedImm32(-1), eqGPR);
- unblessedBooleanResult(eqGPR, node);
-}
-
-void SpeculativeJIT::emitStringBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken)
-{
- SpeculateCellOperand str(this, nodeUse);
- speculateString(nodeUse, str.gpr());
- branchTest32(JITCompiler::NonZero, MacroAssembler::Address(str.gpr(), JSString::offsetOfLength()), taken);
- jump(notTaken);
- noResult(m_currentNode);
+#if USE(JSVALUE64)
+ m_jit.or32(TrustedImm32(ValueFalse), eqGPR);
+ jsValueResult(eqGPR, node, DataFormatJSBoolean);
+#else
+ booleanResult(eqGPR, node);
+#endif
}
void SpeculativeJIT::compileConstantStoragePointer(Node* node)
@@ -4345,7 +3989,7 @@ void SpeculativeJIT::compileGetTypedArrayByteOffset(Node* node)
int32Result(vectorGPR, node);
}
-void SpeculativeJIT::compileGetByValOnDirectArguments(Node* node)
+void SpeculativeJIT::compileGetByValOnArguments(Node* node)
{
SpeculateCellOperand base(this, node->child1());
SpeculateStrictInt32Operand property(this, node->child2());
@@ -4353,134 +3997,87 @@ void SpeculativeJIT::compileGetByValOnDirectArguments(Node* node)
#if USE(JSVALUE32_64)
GPRTemporary resultTag(this);
#endif
+ GPRTemporary scratch(this);
GPRReg baseReg = base.gpr();
GPRReg propertyReg = property.gpr();
GPRReg resultReg = result.gpr();
#if USE(JSVALUE32_64)
GPRReg resultTagReg = resultTag.gpr();
- JSValueRegs resultRegs = JSValueRegs(resultTagReg, resultReg);
-#else
- JSValueRegs resultRegs = JSValueRegs(resultReg);
#endif
+ GPRReg scratchReg = scratch.gpr();
if (!m_compileOkay)
return;
+
+ ASSERT(ArrayMode(Array::Arguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
- ASSERT(ArrayMode(Array::DirectArguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
-
+ // Two really lame checks.
speculationCheck(
- ExoticObjectMode, JSValueSource(), 0,
- m_jit.branchTestPtr(
- MacroAssembler::NonZero,
- MacroAssembler::Address(baseReg, DirectArguments::offsetOfOverrides())));
- speculationCheck(
- ExoticObjectMode, JSValueSource(), 0,
+ Uncountable, JSValueSource(), 0,
m_jit.branch32(
MacroAssembler::AboveOrEqual, propertyReg,
- MacroAssembler::Address(baseReg, DirectArguments::offsetOfLength())));
+ MacroAssembler::Address(baseReg, Arguments::offsetOfNumArguments())));
+ speculationCheck(
+ Uncountable, JSValueSource(), 0,
+ m_jit.branchTestPtr(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(
+ baseReg, Arguments::offsetOfSlowArgumentData())));
- m_jit.loadValue(
- MacroAssembler::BaseIndex(
- baseReg, propertyReg, MacroAssembler::TimesEight, DirectArguments::storageOffset()),
- resultRegs);
+ m_jit.move(propertyReg, resultReg);
+ m_jit.signExtend32ToPtr(resultReg, resultReg);
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, Arguments::offsetOfRegisters()),
+ scratchReg);
- jsValueResult(resultRegs, node);
+#if USE(JSVALUE32_64)
+ m_jit.load32(
+ MacroAssembler::BaseIndex(
+ scratchReg, resultReg, MacroAssembler::TimesEight,
+ CallFrame::thisArgumentOffset() * sizeof(Register) + sizeof(Register) +
+ OBJECT_OFFSETOF(JSValue, u.asBits.tag)),
+ resultTagReg);
+ m_jit.load32(
+ MacroAssembler::BaseIndex(
+ scratchReg, resultReg, MacroAssembler::TimesEight,
+ CallFrame::thisArgumentOffset() * sizeof(Register) + sizeof(Register) +
+ OBJECT_OFFSETOF(JSValue, u.asBits.payload)),
+ resultReg);
+ jsValueResult(resultTagReg, resultReg, node);
+#else
+ m_jit.load64(
+ MacroAssembler::BaseIndex(
+ scratchReg, resultReg, MacroAssembler::TimesEight,
+ CallFrame::thisArgumentOffset() * sizeof(Register) + sizeof(Register)),
+ resultReg);
+ jsValueResult(resultReg, node);
+#endif
}
-void SpeculativeJIT::compileGetByValOnScopedArguments(Node* node)
+void SpeculativeJIT::compileGetArgumentsLength(Node* node)
{
SpeculateCellOperand base(this, node->child1());
- SpeculateStrictInt32Operand property(this, node->child2());
- GPRTemporary result(this);
-#if USE(JSVALUE32_64)
- GPRTemporary resultTag(this);
-#endif
- GPRTemporary scratch(this);
- GPRTemporary scratch2(this);
+ GPRTemporary result(this, Reuse, base);
GPRReg baseReg = base.gpr();
- GPRReg propertyReg = property.gpr();
GPRReg resultReg = result.gpr();
-#if USE(JSVALUE32_64)
- GPRReg resultTagReg = resultTag.gpr();
- JSValueRegs resultRegs = JSValueRegs(resultTagReg, resultReg);
-#else
- JSValueRegs resultRegs = JSValueRegs(resultReg);
-#endif
- GPRReg scratchReg = scratch.gpr();
- GPRReg scratch2Reg = scratch2.gpr();
if (!m_compileOkay)
return;
- ASSERT(ArrayMode(Array::ScopedArguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
+ ASSERT(ArrayMode(Array::Arguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
speculationCheck(
- ExoticObjectMode, JSValueSource(), nullptr,
- m_jit.branch32(
- MacroAssembler::AboveOrEqual, propertyReg,
- MacroAssembler::Address(baseReg, ScopedArguments::offsetOfTotalLength())));
-
- m_jit.loadPtr(MacroAssembler::Address(baseReg, ScopedArguments::offsetOfTable()), scratchReg);
- m_jit.load32(
- MacroAssembler::Address(scratchReg, ScopedArgumentsTable::offsetOfLength()), scratch2Reg);
-
- MacroAssembler::Jump overflowArgument = m_jit.branch32(
- MacroAssembler::AboveOrEqual, propertyReg, scratch2Reg);
+ Uncountable, JSValueSource(), 0,
+ m_jit.branchTest8(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(baseReg, Arguments::offsetOfOverrodeLength())));
- m_jit.loadPtr(MacroAssembler::Address(baseReg, ScopedArguments::offsetOfScope()), scratch2Reg);
-
- m_jit.loadPtr(
- MacroAssembler::Address(scratchReg, ScopedArgumentsTable::offsetOfArguments()),
- scratchReg);
m_jit.load32(
- MacroAssembler::BaseIndex(scratchReg, propertyReg, MacroAssembler::TimesFour),
- scratchReg);
-
- speculationCheck(
- ExoticObjectMode, JSValueSource(), nullptr,
- m_jit.branch32(
- MacroAssembler::Equal, scratchReg, TrustedImm32(ScopeOffset::invalidOffset)));
-
- m_jit.loadValue(
- MacroAssembler::BaseIndex(
- scratch2Reg, propertyReg, MacroAssembler::TimesEight,
- JSEnvironmentRecord::offsetOfVariables()),
- resultRegs);
-
- MacroAssembler::Jump done = m_jit.jump();
- overflowArgument.link(&m_jit);
-
- m_jit.sub32(propertyReg, scratch2Reg);
- m_jit.neg32(scratch2Reg);
-
- m_jit.loadValue(
- MacroAssembler::BaseIndex(
- baseReg, scratch2Reg, MacroAssembler::TimesEight,
- ScopedArguments::overflowStorageOffset()),
- resultRegs);
- speculationCheck(ExoticObjectMode, JSValueSource(), nullptr, m_jit.branchIfEmpty(resultRegs));
-
- done.link(&m_jit);
-
- jsValueResult(resultRegs, node);
-}
-
-void SpeculativeJIT::compileGetScope(Node* node)
-{
- SpeculateCellOperand function(this, node->child1());
- GPRTemporary result(this, Reuse, function);
- m_jit.loadPtr(JITCompiler::Address(function.gpr(), JSFunction::offsetOfScopeChain()), result.gpr());
- cellResult(result.gpr(), node);
-}
-
-void SpeculativeJIT::compileSkipScope(Node* node)
-{
- SpeculateCellOperand scope(this, node->child1());
- GPRTemporary result(this, Reuse, scope);
- m_jit.loadPtr(JITCompiler::Address(scope.gpr(), JSScope::offsetOfNext()), result.gpr());
- cellResult(result.gpr(), node);
+ MacroAssembler::Address(baseReg, Arguments::offsetOfNumArguments()),
+ resultReg);
+ int32Result(resultReg, node);
}
void SpeculativeJIT::compileGetArrayLength(Node* node)
@@ -4520,52 +4117,8 @@ void SpeculativeJIT::compileGetArrayLength(Node* node)
int32Result(resultGPR, node);
break;
}
- case Array::DirectArguments: {
- SpeculateCellOperand base(this, node->child1());
- GPRTemporary result(this, Reuse, base);
-
- GPRReg baseReg = base.gpr();
- GPRReg resultReg = result.gpr();
-
- if (!m_compileOkay)
- return;
-
- ASSERT(ArrayMode(Array::DirectArguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
-
- speculationCheck(
- ExoticObjectMode, JSValueSource(), 0,
- m_jit.branchTestPtr(
- MacroAssembler::NonZero,
- MacroAssembler::Address(baseReg, DirectArguments::offsetOfOverrides())));
-
- m_jit.load32(
- MacroAssembler::Address(baseReg, DirectArguments::offsetOfLength()), resultReg);
-
- int32Result(resultReg, node);
- break;
- }
- case Array::ScopedArguments: {
- SpeculateCellOperand base(this, node->child1());
- GPRTemporary result(this, Reuse, base);
-
- GPRReg baseReg = base.gpr();
- GPRReg resultReg = result.gpr();
-
- if (!m_compileOkay)
- return;
-
- ASSERT(ArrayMode(Array::ScopedArguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
-
- speculationCheck(
- ExoticObjectMode, JSValueSource(), 0,
- m_jit.branchTest8(
- MacroAssembler::NonZero,
- MacroAssembler::Address(baseReg, ScopedArguments::offsetOfOverrodeThings())));
-
- m_jit.load32(
- MacroAssembler::Address(baseReg, ScopedArguments::offsetOfTotalLength()), resultReg);
-
- int32Result(resultReg, node);
+ case Array::Arguments: {
+ compileGetArgumentsLength(node);
break;
}
default: {
@@ -4580,467 +4133,28 @@ void SpeculativeJIT::compileGetArrayLength(Node* node)
} }
}
-void SpeculativeJIT::compileCheckIdent(Node* node)
+void SpeculativeJIT::compileNewFunctionNoCheck(Node* node)
{
- SpeculateCellOperand operand(this, node->child1());
- UniquedStringImpl* uid = node->uidOperand();
- if (uid->isSymbol()) {
- speculateSymbol(node->child1(), operand.gpr());
- speculationCheck(
- BadIdent, JSValueSource(), nullptr,
- m_jit.branchPtr(
- JITCompiler::NotEqual,
- JITCompiler::Address(operand.gpr(), Symbol::offsetOfPrivateName()),
- TrustedImmPtr(uid)));
- } else {
- speculateString(node->child1(), operand.gpr());
- speculateStringIdent(node->child1(), operand.gpr());
- speculationCheck(
- BadIdent, JSValueSource(), nullptr,
- m_jit.branchPtr(
- JITCompiler::NotEqual,
- JITCompiler::Address(operand.gpr(), JSString::offsetOfValue()),
- TrustedImmPtr(uid)));
- }
- noResult(node);
-}
-
-void SpeculativeJIT::compileNewFunction(Node* node)
-{
- SpeculateCellOperand scope(this, node->child1());
- GPRReg scopeGPR = scope.gpr();
-
- FunctionExecutable* executable = node->castOperand<FunctionExecutable*>();
-
- if (executable->singletonFunction()->isStillValid()) {
- GPRFlushedCallResult result(this);
- GPRReg resultGPR = result.gpr();
-
- flushRegisters();
-
- callOperation(operationNewFunction, resultGPR, scopeGPR, executable);
- cellResult(resultGPR, node);
- return;
- }
-
- Structure* structure = m_jit.graph().globalObjectFor(
- node->origin.semantic)->functionStructure();
-
- GPRTemporary result(this);
- GPRTemporary scratch1(this);
- GPRTemporary scratch2(this);
- GPRReg resultGPR = result.gpr();
- GPRReg scratch1GPR = scratch1.gpr();
- GPRReg scratch2GPR = scratch2.gpr();
-
- JITCompiler::JumpList slowPath;
- emitAllocateJSObjectWithKnownSize<JSFunction>(
- resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0),
- scratch1GPR, scratch2GPR, slowPath, JSFunction::allocationSize(0));
-
- // Don't need a memory barriers since we just fast-created the function, so it
- // must be young.
- m_jit.storePtr(
- scopeGPR,
- JITCompiler::Address(resultGPR, JSFunction::offsetOfScopeChain()));
- m_jit.storePtr(
- TrustedImmPtr(executable),
- JITCompiler::Address(resultGPR, JSFunction::offsetOfExecutable()));
- m_jit.storePtr(
- TrustedImmPtr(0),
- JITCompiler::Address(resultGPR, JSFunction::offsetOfRareData()));
-
-
- addSlowPathGenerator(
- slowPathCall(
- slowPath, this, operationNewFunctionWithInvalidatedReallocationWatchpoint, resultGPR, scopeGPR, executable));
-
- cellResult(resultGPR, node);
-}
-
-void SpeculativeJIT::compileForwardVarargs(Node* node)
-{
- LoadVarargsData* data = node->loadVarargsData();
- InlineCallFrame* inlineCallFrame = node->child1()->origin.semantic.inlineCallFrame;
-
- GPRTemporary length(this);
- JSValueRegsTemporary temp(this);
- GPRReg lengthGPR = length.gpr();
- JSValueRegs tempRegs = temp.regs();
-
- emitGetLength(inlineCallFrame, lengthGPR, /* includeThis = */ true);
- if (data->offset)
- m_jit.sub32(TrustedImm32(data->offset), lengthGPR);
-
- speculationCheck(
- VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32(
- MacroAssembler::Above,
- lengthGPR, TrustedImm32(data->limit)));
-
- m_jit.store32(lengthGPR, JITCompiler::payloadFor(data->machineCount));
-
- VirtualRegister sourceStart = JITCompiler::argumentsStart(inlineCallFrame) + data->offset;
- VirtualRegister targetStart = data->machineStart;
-
- m_jit.sub32(TrustedImm32(1), lengthGPR);
-
- // First have a loop that fills in the undefined slots in case of an arity check failure.
- m_jit.move(TrustedImm32(data->mandatoryMinimum), tempRegs.payloadGPR());
- JITCompiler::Jump done = m_jit.branch32(JITCompiler::BelowOrEqual, tempRegs.payloadGPR(), lengthGPR);
-
- JITCompiler::Label loop = m_jit.label();
- m_jit.sub32(TrustedImm32(1), tempRegs.payloadGPR());
- m_jit.storeTrustedValue(
- jsUndefined(),
- JITCompiler::BaseIndex(
- GPRInfo::callFrameRegister, tempRegs.payloadGPR(), JITCompiler::TimesEight,
- targetStart.offset() * sizeof(EncodedJSValue)));
- m_jit.branch32(JITCompiler::Above, tempRegs.payloadGPR(), lengthGPR).linkTo(loop, &m_jit);
- done.link(&m_jit);
-
- // And then fill in the actual argument values.
- done = m_jit.branchTest32(JITCompiler::Zero, lengthGPR);
-
- loop = m_jit.label();
- m_jit.sub32(TrustedImm32(1), lengthGPR);
- m_jit.loadValue(
- JITCompiler::BaseIndex(
- GPRInfo::callFrameRegister, lengthGPR, JITCompiler::TimesEight,
- sourceStart.offset() * sizeof(EncodedJSValue)),
- tempRegs);
- m_jit.storeValue(
- tempRegs,
- JITCompiler::BaseIndex(
- GPRInfo::callFrameRegister, lengthGPR, JITCompiler::TimesEight,
- targetStart.offset() * sizeof(EncodedJSValue)));
- m_jit.branchTest32(JITCompiler::NonZero, lengthGPR).linkTo(loop, &m_jit);
-
- done.link(&m_jit);
-
- noResult(node);
-}
-
-void SpeculativeJIT::compileCreateActivation(Node* node)
-{
- SymbolTable* table = node->castOperand<SymbolTable*>();
- Structure* structure = m_jit.graph().globalObjectFor(
- node->origin.semantic)->activationStructure();
-
- SpeculateCellOperand scope(this, node->child1());
- GPRReg scopeGPR = scope.gpr();
- JSValue initializationValue = node->initializationValueForActivation();
- ASSERT(initializationValue == jsUndefined() || initializationValue == jsTDZValue());
-
- if (table->singletonScope()->isStillValid()) {
- GPRFlushedCallResult result(this);
- GPRReg resultGPR = result.gpr();
-
- flushRegisters();
-
-#if USE(JSVALUE64)
- callOperation(operationCreateActivationDirect,
- resultGPR, structure, scopeGPR, table, TrustedImm64(JSValue::encode(initializationValue)));
-#else
- callOperation(operationCreateActivationDirect,
- resultGPR, structure, scopeGPR, table, TrustedImm32(initializationValue.tag()), TrustedImm32(initializationValue.payload()));
-#endif
- cellResult(resultGPR, node);
- return;
- }
-
- GPRTemporary result(this);
- GPRTemporary scratch1(this);
- GPRTemporary scratch2(this);
- GPRReg resultGPR = result.gpr();
- GPRReg scratch1GPR = scratch1.gpr();
- GPRReg scratch2GPR = scratch2.gpr();
-
- JITCompiler::JumpList slowPath;
- emitAllocateJSObjectWithKnownSize<JSLexicalEnvironment>(
- resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratch1GPR, scratch2GPR,
- slowPath, JSLexicalEnvironment::allocationSize(table));
-
- // Don't need a memory barriers since we just fast-created the activation, so the
- // activation must be young.
- m_jit.storePtr(scopeGPR, JITCompiler::Address(resultGPR, JSScope::offsetOfNext()));
- m_jit.storePtr(
- TrustedImmPtr(table),
- JITCompiler::Address(resultGPR, JSLexicalEnvironment::offsetOfSymbolTable()));
-
- // Must initialize all members to undefined or the TDZ empty value.
- for (unsigned i = 0; i < table->scopeSize(); ++i) {
- m_jit.storeTrustedValue(
- initializationValue,
- JITCompiler::Address(
- resultGPR, JSLexicalEnvironment::offsetOfVariable(ScopeOffset(i))));
- }
-
-#if USE(JSVALUE64)
- addSlowPathGenerator(
- slowPathCall(
- slowPath, this, operationCreateActivationDirect, resultGPR, structure, scopeGPR, table, TrustedImm64(JSValue::encode(initializationValue))));
-#else
- addSlowPathGenerator(
- slowPathCall(
- slowPath, this, operationCreateActivationDirect, resultGPR, structure, scopeGPR, table, TrustedImm32(initializationValue.tag()), TrustedImm32(initializationValue.payload())));
-#endif
-
- cellResult(resultGPR, node);
-}
-
-void SpeculativeJIT::compileCreateDirectArguments(Node* node)
-{
- // FIXME: A more effective way of dealing with the argument count and callee is to have
- // them be explicit arguments to this node.
- // https://bugs.webkit.org/show_bug.cgi?id=142207
-
- GPRTemporary result(this);
- GPRTemporary scratch1(this);
- GPRTemporary scratch2(this);
- GPRTemporary length;
- GPRReg resultGPR = result.gpr();
- GPRReg scratch1GPR = scratch1.gpr();
- GPRReg scratch2GPR = scratch2.gpr();
- GPRReg lengthGPR = InvalidGPRReg;
- JSValueRegs valueRegs = JSValueRegs::withTwoAvailableRegs(scratch1GPR, scratch2GPR);
-
- unsigned minCapacity = m_jit.graph().baselineCodeBlockFor(node->origin.semantic)->numParameters() - 1;
-
- unsigned knownLength;
- bool lengthIsKnown; // if false, lengthGPR will have the length.
- if (node->origin.semantic.inlineCallFrame
- && !node->origin.semantic.inlineCallFrame->isVarargs()) {
- knownLength = node->origin.semantic.inlineCallFrame->arguments.size() - 1;
- lengthIsKnown = true;
- } else {
- knownLength = UINT_MAX;
- lengthIsKnown = false;
-
- GPRTemporary realLength(this);
- length.adopt(realLength);
- lengthGPR = length.gpr();
-
- VirtualRegister argumentCountRegister;
- if (!node->origin.semantic.inlineCallFrame)
- argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
- else
- argumentCountRegister = node->origin.semantic.inlineCallFrame->argumentCountRegister;
- m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
- m_jit.sub32(TrustedImm32(1), lengthGPR);
- }
-
- Structure* structure =
- m_jit.graph().globalObjectFor(node->origin.semantic)->directArgumentsStructure();
-
- // Use a different strategy for allocating the object depending on whether we know its
- // size statically.
- JITCompiler::JumpList slowPath;
- if (lengthIsKnown) {
- emitAllocateJSObjectWithKnownSize<DirectArguments>(
- resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratch1GPR, scratch2GPR,
- slowPath, DirectArguments::allocationSize(std::max(knownLength, minCapacity)));
-
- m_jit.store32(
- TrustedImm32(knownLength),
- JITCompiler::Address(resultGPR, DirectArguments::offsetOfLength()));
- } else {
- JITCompiler::Jump tooFewArguments;
- if (minCapacity) {
- tooFewArguments =
- m_jit.branch32(JITCompiler::Below, lengthGPR, TrustedImm32(minCapacity));
- }
- m_jit.lshift32(lengthGPR, TrustedImm32(3), scratch1GPR);
- m_jit.add32(TrustedImm32(DirectArguments::storageOffset()), scratch1GPR);
- if (minCapacity) {
- JITCompiler::Jump done = m_jit.jump();
- tooFewArguments.link(&m_jit);
- m_jit.move(TrustedImm32(DirectArguments::allocationSize(minCapacity)), scratch1GPR);
- done.link(&m_jit);
- }
-
- emitAllocateVariableSizedJSObject<DirectArguments>(
- resultGPR, TrustedImmPtr(structure), scratch1GPR, scratch1GPR, scratch2GPR,
- slowPath);
-
- m_jit.store32(
- lengthGPR, JITCompiler::Address(resultGPR, DirectArguments::offsetOfLength()));
- }
-
- m_jit.store32(
- TrustedImm32(minCapacity),
- JITCompiler::Address(resultGPR, DirectArguments::offsetOfMinCapacity()));
-
- m_jit.storePtr(
- TrustedImmPtr(0), JITCompiler::Address(resultGPR, DirectArguments::offsetOfOverrides()));
-
- if (lengthIsKnown) {
- addSlowPathGenerator(
- slowPathCall(
- slowPath, this, operationCreateDirectArguments, resultGPR, structure,
- knownLength, minCapacity));
- } else {
- auto generator = std::make_unique<CallCreateDirectArgumentsSlowPathGenerator>(
- slowPath, this, resultGPR, structure, lengthGPR, minCapacity);
- addSlowPathGenerator(WTF::move(generator));
- }
-
- if (node->origin.semantic.inlineCallFrame) {
- if (node->origin.semantic.inlineCallFrame->isClosureCall) {
- m_jit.loadPtr(
- JITCompiler::addressFor(
- node->origin.semantic.inlineCallFrame->calleeRecovery.virtualRegister()),
- scratch1GPR);
- } else {
- m_jit.move(
- TrustedImmPtr(
- node->origin.semantic.inlineCallFrame->calleeRecovery.constant().asCell()),
- scratch1GPR);
- }
- } else
- m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), scratch1GPR);
-
- // Don't need a memory barriers since we just fast-created the activation, so the
- // activation must be young.
- m_jit.storePtr(
- scratch1GPR, JITCompiler::Address(resultGPR, DirectArguments::offsetOfCallee()));
-
- VirtualRegister start = m_jit.argumentsStart(node->origin.semantic);
- if (lengthIsKnown) {
- for (unsigned i = 0; i < std::max(knownLength, minCapacity); ++i) {
- m_jit.loadValue(JITCompiler::addressFor(start + i), valueRegs);
- m_jit.storeValue(
- valueRegs, JITCompiler::Address(resultGPR, DirectArguments::offsetOfSlot(i)));
- }
- } else {
- JITCompiler::Jump done;
- if (minCapacity) {
- JITCompiler::Jump startLoop = m_jit.branch32(
- JITCompiler::AboveOrEqual, lengthGPR, TrustedImm32(minCapacity));
- m_jit.move(TrustedImm32(minCapacity), lengthGPR);
- startLoop.link(&m_jit);
- } else
- done = m_jit.branchTest32(MacroAssembler::Zero, lengthGPR);
- JITCompiler::Label loop = m_jit.label();
- m_jit.sub32(TrustedImm32(1), lengthGPR);
- m_jit.loadValue(
- JITCompiler::BaseIndex(
- GPRInfo::callFrameRegister, lengthGPR, JITCompiler::TimesEight,
- start.offset() * static_cast<int>(sizeof(Register))),
- valueRegs);
- m_jit.storeValue(
- valueRegs,
- JITCompiler::BaseIndex(
- resultGPR, lengthGPR, JITCompiler::TimesEight,
- DirectArguments::storageOffset()));
- m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loop, &m_jit);
- if (done.isSet())
- done.link(&m_jit);
- }
-
- cellResult(resultGPR, node);
-}
-
-void SpeculativeJIT::compileGetFromArguments(Node* node)
-{
- SpeculateCellOperand arguments(this, node->child1());
- JSValueRegsTemporary result(this);
-
- GPRReg argumentsGPR = arguments.gpr();
- JSValueRegs resultRegs = result.regs();
-
- m_jit.loadValue(JITCompiler::Address(argumentsGPR, DirectArguments::offsetOfSlot(node->capturedArgumentsOffset().offset())), resultRegs);
- jsValueResult(resultRegs, node);
-}
-
-void SpeculativeJIT::compilePutToArguments(Node* node)
-{
- SpeculateCellOperand arguments(this, node->child1());
- JSValueOperand value(this, node->child2());
-
- GPRReg argumentsGPR = arguments.gpr();
- JSValueRegs valueRegs = value.jsValueRegs();
-
- m_jit.storeValue(valueRegs, JITCompiler::Address(argumentsGPR, DirectArguments::offsetOfSlot(node->capturedArgumentsOffset().offset())));
- noResult(node);
-}
-
-void SpeculativeJIT::compileCreateScopedArguments(Node* node)
-{
- SpeculateCellOperand scope(this, node->child1());
- GPRReg scopeGPR = scope.gpr();
-
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultGPR = result.gpr();
flushRegisters();
-
- // We set up the arguments ourselves, because we have the whole register file and we can
- // set them up directly into the argument registers. This also means that we don't have to
- // invent a four-argument-register shuffle.
-
- // Arguments: 0:exec, 1:structure, 2:start, 3:length, 4:callee, 5:scope
-
- // Do the scopeGPR first, since it might alias an argument register.
- m_jit.setupArgument(5, [&] (GPRReg destGPR) { m_jit.move(scopeGPR, destGPR); });
-
- // These other things could be done in any order.
- m_jit.setupArgument(4, [&] (GPRReg destGPR) { emitGetCallee(node->origin.semantic, destGPR); });
- m_jit.setupArgument(3, [&] (GPRReg destGPR) { emitGetLength(node->origin.semantic, destGPR); });
- m_jit.setupArgument(2, [&] (GPRReg destGPR) { emitGetArgumentStart(node->origin.semantic, destGPR); });
- m_jit.setupArgument(
- 1, [&] (GPRReg destGPR) {
- m_jit.move(
- TrustedImmPtr(m_jit.globalObjectFor(node->origin.semantic)->scopedArgumentsStructure()),
- destGPR);
- });
- m_jit.setupArgument(0, [&] (GPRReg destGPR) { m_jit.move(GPRInfo::callFrameRegister, destGPR); });
-
- appendCallWithExceptionCheckSetResult(operationCreateScopedArguments, resultGPR);
-
+ callOperation(
+ operationNewFunctionNoCheck, resultGPR, m_jit.codeBlock()->functionDecl(node->functionDeclIndex()));
cellResult(resultGPR, node);
}
-void SpeculativeJIT::compileCreateClonedArguments(Node* node)
+void SpeculativeJIT::compileNewFunctionExpression(Node* node)
{
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultGPR = result.gpr();
flushRegisters();
-
- // We set up the arguments ourselves, because we have the whole register file and we can
- // set them up directly into the argument registers.
-
- // Arguments: 0:exec, 1:structure, 2:start, 3:length, 4:callee
- m_jit.setupArgument(4, [&] (GPRReg destGPR) { emitGetCallee(node->origin.semantic, destGPR); });
- m_jit.setupArgument(3, [&] (GPRReg destGPR) { emitGetLength(node->origin.semantic, destGPR); });
- m_jit.setupArgument(2, [&] (GPRReg destGPR) { emitGetArgumentStart(node->origin.semantic, destGPR); });
- m_jit.setupArgument(
- 1, [&] (GPRReg destGPR) {
- m_jit.move(
- TrustedImmPtr(
- m_jit.globalObjectFor(node->origin.semantic)->outOfBandArgumentsStructure()),
- destGPR);
- });
- m_jit.setupArgument(0, [&] (GPRReg destGPR) { m_jit.move(GPRInfo::callFrameRegister, destGPR); });
-
- appendCallWithExceptionCheckSetResult(operationCreateClonedArguments, resultGPR);
-
+ callOperation(
+ operationNewFunctionNoCheck,
+ resultGPR,
+ m_jit.codeBlock()->functionExpr(node->functionExprIndex()));
cellResult(resultGPR, node);
}
-void SpeculativeJIT::compileNotifyWrite(Node* node)
-{
- WatchpointSet* set = node->watchpointSet();
-
- JITCompiler::Jump slowCase = m_jit.branch8(
- JITCompiler::NotEqual,
- JITCompiler::AbsoluteAddress(set->addressOfState()),
- TrustedImm32(IsInvalidated));
-
- addSlowPathGenerator(
- slowPathCall(slowCase, this, operationNotifyWrite, NoResult, set));
-
- noResult(node);
-}
-
bool SpeculativeJIT::compileRegExpExec(Node* node)
{
unsigned branchIndexInBlock = detectPeepHoleBranch();
@@ -5049,8 +4163,8 @@ bool SpeculativeJIT::compileRegExpExec(Node* node)
Node* branchNode = m_block->at(branchIndexInBlock);
ASSERT(node->adjustedRefCount() == 1);
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+ BasicBlock* taken = branchNode->takenBlock();
+ BasicBlock* notTaken = branchNode->notTakenBlock();
bool invert = false;
if (taken == nextBlock()) {
@@ -5066,7 +4180,7 @@ bool SpeculativeJIT::compileRegExpExec(Node* node)
GPRReg argumentGPR = argument.gpr();
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, result.gpr(), taken);
@@ -5080,128 +4194,16 @@ bool SpeculativeJIT::compileRegExpExec(Node* node)
return true;
}
-void SpeculativeJIT::compileIsObjectOrNull(Node* node)
-{
- JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
-
- JSValueOperand value(this, node->child1());
- JSValueRegs valueRegs = value.jsValueRegs();
-
- GPRTemporary result(this);
- GPRReg resultGPR = result.gpr();
-
- JITCompiler::Jump isCell = m_jit.branchIfCell(valueRegs);
-
- JITCompiler::Jump isNull = m_jit.branchIfEqual(valueRegs, jsNull());
- JITCompiler::Jump isNonNullNonCell = m_jit.jump();
-
- isCell.link(&m_jit);
- JITCompiler::Jump isFunction = m_jit.branchIfFunction(valueRegs.payloadGPR());
- JITCompiler::Jump notObject = m_jit.branchIfNotObject(valueRegs.payloadGPR());
-
- JITCompiler::Jump slowPath = m_jit.branchTest8(
- JITCompiler::NonZero,
- JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoFlagsOffset()),
- TrustedImm32(MasqueradesAsUndefined | TypeOfShouldCallGetCallData));
-
- isNull.link(&m_jit);
- m_jit.move(TrustedImm32(1), resultGPR);
- JITCompiler::Jump done = m_jit.jump();
-
- isNonNullNonCell.link(&m_jit);
- isFunction.link(&m_jit);
- notObject.link(&m_jit);
- m_jit.move(TrustedImm32(0), resultGPR);
-
- addSlowPathGenerator(
- slowPathCall(
- slowPath, this, operationObjectIsObject, resultGPR, globalObject,
- valueRegs.payloadGPR()));
-
- done.link(&m_jit);
-
- unblessedBooleanResult(resultGPR, node);
-}
-
-void SpeculativeJIT::compileIsFunction(Node* node)
-{
- JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
-
- JSValueOperand value(this, node->child1());
- JSValueRegs valueRegs = value.jsValueRegs();
-
- GPRTemporary result(this);
- GPRReg resultGPR = result.gpr();
-
- JITCompiler::Jump notCell = m_jit.branchIfNotCell(valueRegs);
- JITCompiler::Jump isFunction = m_jit.branchIfFunction(valueRegs.payloadGPR());
- JITCompiler::Jump notObject = m_jit.branchIfNotObject(valueRegs.payloadGPR());
-
- JITCompiler::Jump slowPath = m_jit.branchTest8(
- JITCompiler::NonZero,
- JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoFlagsOffset()),
- TrustedImm32(MasqueradesAsUndefined | TypeOfShouldCallGetCallData));
-
- notCell.link(&m_jit);
- notObject.link(&m_jit);
- m_jit.move(TrustedImm32(0), resultGPR);
- JITCompiler::Jump done = m_jit.jump();
-
- isFunction.link(&m_jit);
- m_jit.move(TrustedImm32(1), resultGPR);
-
- addSlowPathGenerator(
- slowPathCall(
- slowPath, this, operationObjectIsFunction, resultGPR, globalObject,
- valueRegs.payloadGPR()));
-
- done.link(&m_jit);
-
- unblessedBooleanResult(resultGPR, node);
-}
-
-void SpeculativeJIT::compileTypeOf(Node* node)
-{
- JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
-
- JSValueOperand value(this, node->child1());
- JSValueRegs valueRegs = value.jsValueRegs();
-
- GPRTemporary result(this);
- GPRReg resultGPR = result.gpr();
-
- JITCompiler::JumpList done;
- JITCompiler::Jump slowPath;
- m_jit.emitTypeOf(
- valueRegs, resultGPR,
- [&] (TypeofType type, bool fallsThrough) {
- m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.typeString(type)), resultGPR);
- if (!fallsThrough)
- done.append(m_jit.jump());
- },
- [&] (JITCompiler::Jump theSlowPath) {
- slowPath = theSlowPath;
- });
- done.link(&m_jit);
-
- addSlowPathGenerator(
- slowPathCall(
- slowPath, this, operationTypeOfObject, resultGPR, globalObject,
- valueRegs.payloadGPR()));
-
- cellResult(resultGPR, node);
-}
-
void SpeculativeJIT::compileAllocatePropertyStorage(Node* node)
{
- if (node->transition()->previous->couldHaveIndexingHeader()) {
+ if (node->structureTransitionData().previousStructure->couldHaveIndexingHeader()) {
SpeculateCellOperand base(this, node->child1());
GPRReg baseGPR = base.gpr();
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
callOperation(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity, result.gpr(), baseGPR);
storageResult(result.gpr(), node);
@@ -5214,8 +4216,8 @@ void SpeculativeJIT::compileAllocatePropertyStorage(Node* node)
GPRReg baseGPR = base.gpr();
GPRReg scratchGPR1 = scratch1.gpr();
- ASSERT(!node->transition()->previous->outOfLineCapacity());
- ASSERT(initialOutOfLineCapacity == node->transition()->next->outOfLineCapacity());
+ ASSERT(!node->structureTransitionData().previousStructure->outOfLineCapacity());
+ ASSERT(initialOutOfLineCapacity == node->structureTransitionData().newStructure->outOfLineCapacity());
JITCompiler::Jump slowPath =
emitAllocateBasicStorage(
@@ -5233,19 +4235,23 @@ void SpeculativeJIT::compileAllocatePropertyStorage(Node* node)
void SpeculativeJIT::compileReallocatePropertyStorage(Node* node)
{
- size_t oldSize = node->transition()->previous->outOfLineCapacity() * sizeof(JSValue);
+ size_t oldSize = node->structureTransitionData().previousStructure->outOfLineCapacity() * sizeof(JSValue);
size_t newSize = oldSize * outOfLineGrowthFactor;
- ASSERT(newSize == node->transition()->next->outOfLineCapacity() * sizeof(JSValue));
+ ASSERT(newSize == node->structureTransitionData().newStructure->outOfLineCapacity() * sizeof(JSValue));
- if (node->transition()->previous->couldHaveIndexingHeader()) {
+ if (node->structureTransitionData().previousStructure->couldHaveIndexingHeader()) {
SpeculateCellOperand base(this, node->child1());
GPRReg baseGPR = base.gpr();
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
callOperation(operationReallocateButterflyToGrowPropertyStorage, result.gpr(), baseGPR, newSize / sizeof(JSValue));
+
+ MacroAssembler::Jump notNull = m_jit.branchTestPtr(MacroAssembler::NonZero, result.gpr());
+ m_jit.breakpoint();
+ notNull.link(&m_jit);
storageResult(result.gpr(), node);
return;
@@ -5289,7 +4295,7 @@ GPRReg SpeculativeJIT::temporaryRegisterForPutByVal(GPRTemporary& temporary, Arr
return temporary.gpr();
}
-void SpeculativeJIT::compileToStringOrCallStringConstructorOnCell(Node* node)
+void SpeculativeJIT::compileToStringOnCell(Node* node)
{
SpeculateCellOperand op1(this, node->child1());
GPRReg op1GPR = op1.gpr();
@@ -5310,13 +4316,11 @@ void SpeculativeJIT::compileToStringOrCallStringConstructorOnCell(Node* node)
case StringOrStringObjectUse: {
GPRTemporary result(this);
GPRReg resultGPR = result.gpr();
-
- m_jit.load32(JITCompiler::Address(op1GPR, JSCell::structureIDOffset()), resultGPR);
- JITCompiler::Jump isString = m_jit.branchStructurePtr(
- JITCompiler::Equal,
- resultGPR,
- m_jit.vm()->stringStructure.get());
-
+
+ m_jit.loadPtr(JITCompiler::Address(op1GPR, JSCell::structureOffset()), resultGPR);
+ JITCompiler::Jump isString = m_jit.branchPtr(
+ JITCompiler::Equal, resultGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()));
+
speculateStringObjectForStructure(node->child1(), resultGPR);
m_jit.loadPtr(JITCompiler::Address(op1GPR, JSWrapperObject::internalValueCellOffset()), resultGPR);
@@ -5333,7 +4337,7 @@ void SpeculativeJIT::compileToStringOrCallStringConstructorOnCell(Node* node)
}
case CellUse: {
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultGPR = result.gpr();
// We flush registers instead of silent spill/fill because in this mode we
@@ -5342,17 +4346,15 @@ void SpeculativeJIT::compileToStringOrCallStringConstructorOnCell(Node* node)
flushRegisters();
JITCompiler::Jump done;
if (node->child1()->prediction() & SpecString) {
- JITCompiler::Jump needCall = m_jit.branchIfNotString(op1GPR);
+ JITCompiler::Jump needCall = m_jit.branchPtr(
+ JITCompiler::NotEqual,
+ JITCompiler::Address(op1GPR, JSCell::structureOffset()),
+ TrustedImmPtr(m_jit.vm()->stringStructure.get()));
m_jit.move(op1GPR, resultGPR);
done = m_jit.jump();
needCall.link(&m_jit);
}
- if (node->op() == ToString)
- callOperation(operationToStringOnCell, resultGPR, op1GPR);
- else {
- ASSERT(node->op() == CallStringConstructor);
- callOperation(operationCallStringConstructorOnCell, resultGPR, op1GPR);
- }
+ callOperation(operationToStringOnCell, resultGPR, op1GPR);
if (done.isSet())
done.link(&m_jit);
cellResult(resultGPR, node);
@@ -5406,7 +4408,7 @@ void SpeculativeJIT::compileNewStringObject(Node* node)
void SpeculativeJIT::compileNewTypedArray(Node* node)
{
- JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
+ JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin);
TypedArrayType type = node->typedArrayType();
Structure* structure = globalObject->typedArrayStructure(type);
@@ -5482,28 +4484,6 @@ void SpeculativeJIT::compileNewTypedArray(Node* node)
cellResult(resultGPR, node);
}
-void SpeculativeJIT::speculateCellTypeWithoutTypeFiltering(
- Edge edge, GPRReg cellGPR, JSType jsType)
-{
- speculationCheck(
- BadType, JSValueSource::unboxedCell(cellGPR), edge,
- m_jit.branch8(
- MacroAssembler::NotEqual,
- MacroAssembler::Address(cellGPR, JSCell::typeInfoTypeOffset()),
- MacroAssembler::TrustedImm32(jsType)));
-}
-
-void SpeculativeJIT::speculateCellType(
- Edge edge, GPRReg cellGPR, SpeculatedType specType, JSType jsType)
-{
- DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(cellGPR), edge, specType,
- m_jit.branch8(
- MacroAssembler::NotEqual,
- MacroAssembler::Address(cellGPR, JSCell::typeInfoTypeOffset()),
- TrustedImm32(jsType)));
-}
-
void SpeculativeJIT::speculateInt32(Edge edge)
{
if (!needsTypeCheck(edge, SpecInt32))
@@ -5512,67 +4492,36 @@ void SpeculativeJIT::speculateInt32(Edge edge)
(SpeculateInt32Operand(this, edge)).gpr();
}
-void SpeculativeJIT::speculateNumber(Edge edge)
+void SpeculativeJIT::speculateMachineInt(Edge edge)
{
- if (!needsTypeCheck(edge, SpecBytecodeNumber))
+#if USE(JSVALUE64)
+ if (!needsTypeCheck(edge, SpecMachineInt))
return;
- JSValueOperand value(this, edge, ManualOperandSpeculation);
-#if USE(JSVALUE64)
- GPRReg gpr = value.gpr();
- typeCheck(
- JSValueRegs(gpr), edge, SpecBytecodeNumber,
- m_jit.branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
-#else
- GPRReg tagGPR = value.tagGPR();
- DFG_TYPE_CHECK(
- value.jsValueRegs(), edge, ~SpecInt32,
- m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag)));
- DFG_TYPE_CHECK(
- value.jsValueRegs(), edge, SpecBytecodeNumber,
- m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)));
-#endif
+ (SpeculateWhicheverInt52Operand(this, edge)).gpr();
+#else // USE(JSVALUE64)
+ UNUSED_PARAM(edge);
+ UNREACHABLE_FOR_PLATFORM();
+#endif // USE(JSVALUE64)
}
-void SpeculativeJIT::speculateRealNumber(Edge edge)
+void SpeculativeJIT::speculateNumber(Edge edge)
{
- if (!needsTypeCheck(edge, SpecBytecodeRealNumber))
+ if (!needsTypeCheck(edge, SpecFullNumber))
return;
- JSValueOperand op1(this, edge, ManualOperandSpeculation);
- FPRTemporary result(this);
-
- JSValueRegs op1Regs = op1.jsValueRegs();
- FPRReg resultFPR = result.fpr();
-
-#if USE(JSVALUE64)
- GPRTemporary temp(this);
- GPRReg tempGPR = temp.gpr();
- m_jit.move(op1Regs.gpr(), tempGPR);
- m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
-#else
- FPRTemporary temp(this);
- FPRReg tempFPR = temp.fpr();
- unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
-#endif
-
- JITCompiler::Jump done = m_jit.branchDouble(
- JITCompiler::DoubleEqual, resultFPR, resultFPR);
-
- typeCheck(op1Regs, edge, SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
-
- done.link(&m_jit);
+ (SpeculateDoubleOperand(this, edge)).fpr();
}
-void SpeculativeJIT::speculateDoubleRepReal(Edge edge)
+void SpeculativeJIT::speculateRealNumber(Edge edge)
{
- if (!needsTypeCheck(edge, SpecDoubleReal))
+ if (!needsTypeCheck(edge, SpecFullRealNumber))
return;
SpeculateDoubleOperand operand(this, edge);
FPRReg fpr = operand.fpr();
- typeCheck(
- JSValueRegs(), edge, SpecDoubleReal,
+ DFG_TYPE_CHECK(
+ JSValueRegs(), edge, SpecFullRealNumber,
m_jit.branchDouble(
MacroAssembler::DoubleNotEqualOrUnordered, fpr, fpr));
}
@@ -5601,16 +4550,10 @@ void SpeculativeJIT::speculateObject(Edge edge)
SpeculateCellOperand operand(this, edge);
GPRReg gpr = operand.gpr();
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(gpr), edge, SpecObject, m_jit.branchIfNotObject(gpr));
-}
-
-void SpeculativeJIT::speculateFunction(Edge edge)
-{
- if (!needsTypeCheck(edge, SpecFunction))
- return;
-
- SpeculateCellOperand operand(this, edge);
- speculateCellType(edge, operand.gpr(), SpecFunction, JSFunctionType);
+ JSValueSource::unboxedCell(gpr), edge, SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(gpr, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
}
void SpeculativeJIT::speculateFinalObject(Edge edge)
@@ -5619,7 +4562,15 @@ void SpeculativeJIT::speculateFinalObject(Edge edge)
return;
SpeculateCellOperand operand(this, edge);
- speculateCellType(edge, operand.gpr(), SpecFinalObject, FinalObjectType);
+ GPRTemporary structure(this);
+ GPRReg gpr = operand.gpr();
+ GPRReg structureGPR = structure.gpr();
+ m_jit.loadPtr(MacroAssembler::Address(gpr, JSCell::structureOffset()), structureGPR);
+ DFG_TYPE_CHECK(
+ JSValueSource::unboxedCell(gpr), edge, SpecFinalObject, m_jit.branch8(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(structureGPR, Structure::typeInfoTypeOffset()),
+ TrustedImm32(FinalObjectType)));
}
void SpeculativeJIT::speculateObjectOrOther(Edge edge)
@@ -5630,31 +4581,68 @@ void SpeculativeJIT::speculateObjectOrOther(Edge edge)
JSValueOperand operand(this, edge, ManualOperandSpeculation);
GPRTemporary temp(this);
GPRReg tempGPR = temp.gpr();
- MacroAssembler::Jump notCell = m_jit.branchIfNotCell(operand.jsValueRegs());
- GPRReg gpr = operand.jsValueRegs().payloadGPR();
+#if USE(JSVALUE64)
+ GPRReg gpr = operand.gpr();
+ MacroAssembler::Jump notCell = m_jit.branchTest64(
+ MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister);
DFG_TYPE_CHECK(
- operand.jsValueRegs(), edge, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(gpr));
+ JSValueRegs(gpr), edge, (~SpecCell) | SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(gpr, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
MacroAssembler::Jump done = m_jit.jump();
notCell.link(&m_jit);
if (needsTypeCheck(edge, SpecCell | SpecOther)) {
+ m_jit.move(gpr, tempGPR);
+ m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), tempGPR);
+
typeCheck(
- operand.jsValueRegs(), edge, SpecCell | SpecOther,
- m_jit.branchIfNotOther(operand.jsValueRegs(), tempGPR));
+ JSValueRegs(gpr), edge, SpecCell | SpecOther,
+ m_jit.branch64(
+ MacroAssembler::NotEqual, tempGPR,
+ MacroAssembler::TrustedImm64(ValueNull)));
}
done.link(&m_jit);
+#else
+ GPRReg tagGPR = operand.tagGPR();
+ GPRReg payloadGPR = operand.payloadGPR();
+ MacroAssembler::Jump notCell =
+ m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::CellTag));
+ DFG_TYPE_CHECK(
+ JSValueRegs(tagGPR, payloadGPR), edge, (~SpecCell) | SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(payloadGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
+ MacroAssembler::Jump done = m_jit.jump();
+ notCell.link(&m_jit);
+ if (needsTypeCheck(edge, SpecCell | SpecOther)) {
+ m_jit.move(tagGPR, tempGPR);
+ m_jit.or32(TrustedImm32(1), tempGPR);
+
+ typeCheck(
+ JSValueRegs(tagGPR, payloadGPR), edge, SpecCell | SpecOther,
+ m_jit.branch32(
+ MacroAssembler::NotEqual, tempGPR,
+ MacroAssembler::TrustedImm32(JSValue::NullTag)));
+ }
+ done.link(&m_jit);
+#endif
}
void SpeculativeJIT::speculateString(Edge edge, GPRReg cell)
{
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(cell), edge, SpecString | ~SpecCell, m_jit.branchIfNotString(cell));
+ JSValueSource::unboxedCell(cell), edge, SpecString, m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(cell, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
}
void SpeculativeJIT::speculateStringIdentAndLoadStorage(Edge edge, GPRReg string, GPRReg storage)
{
m_jit.loadPtr(MacroAssembler::Address(string, JSString::offsetOfValue()), storage);
- if (!needsTypeCheck(edge, SpecStringIdent | ~SpecString))
+ if (!needsTypeCheck(edge, SpecStringIdent))
return;
speculationCheck(
@@ -5664,9 +4652,9 @@ void SpeculativeJIT::speculateStringIdentAndLoadStorage(Edge edge, GPRReg string
BadType, JSValueSource::unboxedCell(string), edge, m_jit.branchTest32(
MacroAssembler::Zero,
MacroAssembler::Address(storage, StringImpl::flagsOffset()),
- MacroAssembler::TrustedImm32(StringImpl::flagIsAtomic())));
+ MacroAssembler::TrustedImm32(StringImpl::flagIsIdentifier())));
- m_interpreter.filter(edge, SpecStringIdent | ~SpecString);
+ m_interpreter.filter(edge, SpecStringIdent);
}
void SpeculativeJIT::speculateStringIdent(Edge edge, GPRReg string)
@@ -5700,7 +4688,7 @@ void SpeculativeJIT::speculateString(Edge edge)
void SpeculativeJIT::speculateStringObject(Edge edge, GPRReg gpr)
{
- speculateStringObjectForStructure(edge, JITCompiler::Address(gpr, JSCell::structureIDOffset()));
+ speculateStringObjectForStructure(edge, JITCompiler::Address(gpr, JSCell::structureOffset()));
}
void SpeculativeJIT::speculateStringObject(Edge edge)
@@ -5726,52 +4714,20 @@ void SpeculativeJIT::speculateStringOrStringObject(Edge edge)
GPRReg gpr = operand.gpr();
if (!needsTypeCheck(edge, SpecString | SpecStringObject))
return;
-
- GPRTemporary structureID(this);
- GPRReg structureIDGPR = structureID.gpr();
-
- m_jit.load32(JITCompiler::Address(gpr, JSCell::structureIDOffset()), structureIDGPR);
- JITCompiler::Jump isString = m_jit.branchStructurePtr(
- JITCompiler::Equal,
- structureIDGPR,
- m_jit.vm()->stringStructure.get());
- speculateStringObjectForStructure(edge, structureIDGPR);
+ GPRTemporary structure(this);
+ GPRReg structureGPR = structure.gpr();
- isString.link(&m_jit);
-
- m_interpreter.filter(edge, SpecString | SpecStringObject);
-}
-
-void SpeculativeJIT::speculateNotStringVar(Edge edge)
-{
- JSValueOperand operand(this, edge, ManualOperandSpeculation);
- GPRTemporary temp(this);
- GPRReg tempGPR = temp.gpr();
+ m_jit.loadPtr(JITCompiler::Address(gpr, JSCell::structureOffset()), structureGPR);
- JITCompiler::Jump notCell = m_jit.branchIfNotCell(operand.jsValueRegs());
- GPRReg cell = operand.jsValueRegs().payloadGPR();
+ JITCompiler::Jump isString = m_jit.branchPtr(
+ JITCompiler::Equal, structureGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()));
- JITCompiler::Jump notString = m_jit.branchIfNotString(cell);
+ speculateStringObjectForStructure(edge, structureGPR);
- speculateStringIdentAndLoadStorage(edge, cell, tempGPR);
+ isString.link(&m_jit);
- notString.link(&m_jit);
- notCell.link(&m_jit);
-}
-
-void SpeculativeJIT::speculateSymbol(Edge edge, GPRReg cell)
-{
- DFG_TYPE_CHECK(JSValueSource::unboxedCell(cell), edge, SpecSymbol, m_jit.branchIfNotSymbol(cell));
-}
-
-void SpeculativeJIT::speculateSymbol(Edge edge)
-{
- if (!needsTypeCheck(edge, SpecSymbol))
- return;
-
- SpeculateCellOperand operand(this, edge);
- speculateSymbol(edge, operand.gpr());
+ m_interpreter.filter(edge, SpecString | SpecStringObject);
}
void SpeculativeJIT::speculateNotCell(Edge edge)
@@ -5779,8 +4735,18 @@ void SpeculativeJIT::speculateNotCell(Edge edge)
if (!needsTypeCheck(edge, ~SpecCell))
return;
- JSValueOperand operand(this, edge, ManualOperandSpeculation);
- typeCheck(operand.jsValueRegs(), edge, ~SpecCell, m_jit.branchIfCell(operand.jsValueRegs()));
+ JSValueOperand operand(this, edge, ManualOperandSpeculation);
+#if USE(JSVALUE64)
+ typeCheck(
+ JSValueRegs(operand.gpr()), edge, ~SpecCell,
+ m_jit.branchTest64(
+ JITCompiler::Zero, operand.gpr(), GPRInfo::tagMaskRegister));
+#else
+ typeCheck(
+ JSValueRegs(operand.tagGPR(), operand.payloadGPR()), edge, ~SpecCell,
+ m_jit.branch32(
+ JITCompiler::Equal, operand.tagGPR(), TrustedImm32(JSValue::CellTag)));
+#endif
}
void SpeculativeJIT::speculateOther(Edge edge)
@@ -5791,34 +4757,21 @@ void SpeculativeJIT::speculateOther(Edge edge)
JSValueOperand operand(this, edge, ManualOperandSpeculation);
GPRTemporary temp(this);
GPRReg tempGPR = temp.gpr();
- typeCheck(
- operand.jsValueRegs(), edge, SpecOther,
- m_jit.branchIfNotOther(operand.jsValueRegs(), tempGPR));
-}
-
-void SpeculativeJIT::speculateMisc(Edge edge, JSValueRegs regs)
-{
#if USE(JSVALUE64)
- DFG_TYPE_CHECK(
- regs, edge, SpecMisc,
- m_jit.branch64(MacroAssembler::Above, regs.gpr(), MacroAssembler::TrustedImm64(TagBitTypeOther | TagBitBool | TagBitUndefined)));
+ m_jit.move(operand.gpr(), tempGPR);
+ m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), tempGPR);
+ typeCheck(
+ JSValueRegs(operand.gpr()), edge, SpecOther,
+ m_jit.branch64(
+ MacroAssembler::NotEqual, tempGPR,
+ MacroAssembler::TrustedImm64(ValueNull)));
#else
- DFG_TYPE_CHECK(
- regs, edge, ~SpecInt32,
- m_jit.branch32(MacroAssembler::Equal, regs.tagGPR(), MacroAssembler::TrustedImm32(JSValue::Int32Tag)));
- DFG_TYPE_CHECK(
- regs, edge, SpecMisc,
- m_jit.branch32(MacroAssembler::Below, regs.tagGPR(), MacroAssembler::TrustedImm32(JSValue::UndefinedTag)));
-#endif
-}
-
-void SpeculativeJIT::speculateMisc(Edge edge)
-{
- if (!needsTypeCheck(edge, SpecMisc))
- return;
-
- JSValueOperand operand(this, edge, ManualOperandSpeculation);
- speculateMisc(edge, operand.jsValueRegs());
+ m_jit.move(operand.tagGPR(), tempGPR);
+ m_jit.or32(TrustedImm32(1), tempGPR);
+ typeCheck(
+ JSValueRegs(operand.tagGPR(), operand.payloadGPR()), edge, SpecOther,
+ m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(JSValue::NullTag)));
+#endif
}
void SpeculativeJIT::speculate(Node*, Edge edge)
@@ -5829,11 +4782,8 @@ void SpeculativeJIT::speculate(Node*, Edge edge)
case KnownInt32Use:
ASSERT(!needsTypeCheck(edge, SpecInt32));
break;
- case DoubleRepUse:
- ASSERT(!needsTypeCheck(edge, SpecFullDouble));
- break;
- case Int52RepUse:
- ASSERT(!needsTypeCheck(edge, SpecMachineInt));
+ case KnownNumberUse:
+ ASSERT(!needsTypeCheck(edge, SpecFullNumber));
break;
case KnownCellUse:
ASSERT(!needsTypeCheck(edge, SpecCell));
@@ -5844,38 +4794,24 @@ void SpeculativeJIT::speculate(Node*, Edge edge)
case Int32Use:
speculateInt32(edge);
break;
- case NumberUse:
- speculateNumber(edge);
+ case MachineIntUse:
+ speculateMachineInt(edge);
break;
case RealNumberUse:
speculateRealNumber(edge);
break;
- case DoubleRepRealUse:
- speculateDoubleRepReal(edge);
- break;
-#if USE(JSVALUE64)
- case MachineIntUse:
- speculateMachineInt(edge);
- break;
- case DoubleRepMachineIntUse:
- speculateDoubleRepMachineInt(edge);
+ case NumberUse:
+ speculateNumber(edge);
break;
-#endif
case BooleanUse:
speculateBoolean(edge);
break;
- case KnownBooleanUse:
- ASSERT(!needsTypeCheck(edge, SpecBoolean));
- break;
case CellUse:
speculateCell(edge);
break;
case ObjectUse:
speculateObject(edge);
break;
- case FunctionUse:
- speculateFunction(edge);
- break;
case FinalObjectUse:
speculateFinalObject(edge);
break;
@@ -5888,27 +4824,18 @@ void SpeculativeJIT::speculate(Node*, Edge edge)
case StringUse:
speculateString(edge);
break;
- case SymbolUse:
- speculateSymbol(edge);
- break;
case StringObjectUse:
speculateStringObject(edge);
break;
case StringOrStringObjectUse:
speculateStringOrStringObject(edge);
break;
- case NotStringVarUse:
- speculateNotStringVar(edge);
- break;
case NotCellUse:
speculateNotCell(edge);
break;
case OtherUse:
speculateOther(edge);
break;
- case MiscUse:
- speculateMisc(edge);
- break;
default:
RELEASE_ASSERT_NOT_REACHED();
break;
@@ -5919,11 +4846,10 @@ void SpeculativeJIT::emitSwitchIntJump(
SwitchData* data, GPRReg value, GPRReg scratch)
{
SimpleJumpTable& table = m_jit.codeBlock()->switchJumpTable(data->switchTableIndex);
- table.ensureCTITable();
m_jit.sub32(Imm32(table.min), value);
addBranch(
m_jit.branch32(JITCompiler::AboveOrEqual, value, Imm32(table.ctiOffsets.size())),
- data->fallThrough.block);
+ data->fallThrough);
m_jit.move(TrustedImmPtr(table.ctiOffsets.begin()), scratch);
m_jit.loadPtr(JITCompiler::BaseIndex(scratch, value, JITCompiler::timesPtr()), scratch);
m_jit.jump(scratch);
@@ -5957,7 +4883,7 @@ void SpeculativeJIT::emitSwitchImm(Node* node, SwitchData* data)
addBranch(
m_jit.branchTest64(
JITCompiler::Zero, valueRegs.gpr(), GPRInfo::tagTypeNumberRegister),
- data->fallThrough.block);
+ data->fallThrough);
silentSpillAllRegisters(scratch);
callOperation(operationFindSwitchImmTargetForDouble, scratch, valueRegs.gpr(), data->switchTableIndex);
silentFillAllRegisters(scratch);
@@ -5971,7 +4897,7 @@ void SpeculativeJIT::emitSwitchImm(Node* node, SwitchData* data)
m_jit.branch32(
JITCompiler::AboveOrEqual, valueRegs.tagGPR(),
TrustedImm32(JSValue::LowestTag)),
- data->fallThrough.block);
+ data->fallThrough);
silentSpillAllRegisters(scratch);
callOperation(operationFindSwitchImmTargetForDouble, scratch, valueRegs, data->switchTableIndex);
silentFillAllRegisters(scratch);
@@ -5995,7 +4921,7 @@ void SpeculativeJIT::emitSwitchCharStringJump(
MacroAssembler::NotEqual,
MacroAssembler::Address(value, JSString::offsetOfLength()),
TrustedImm32(1)),
- data->fallThrough.block);
+ data->fallThrough);
m_jit.loadPtr(MacroAssembler::Address(value, JSString::offsetOfValue()), scratch);
@@ -6049,9 +4975,24 @@ void SpeculativeJIT::emitSwitchChar(Node* node, SwitchData* data)
op1.use();
- addBranch(m_jit.branchIfNotCell(op1Regs), data->fallThrough.block);
+#if USE(JSVALUE64)
+ addBranch(
+ m_jit.branchTest64(
+ MacroAssembler::NonZero, op1Regs.gpr(), GPRInfo::tagMaskRegister),
+ data->fallThrough);
+#else
+ addBranch(
+ m_jit.branch32(
+ MacroAssembler::NotEqual, op1Regs.tagGPR(), TrustedImm32(JSValue::CellTag)),
+ data->fallThrough);
+#endif
- addBranch(m_jit.branchIfNotString(op1Regs.payloadGPR()), data->fallThrough.block);
+ addBranch(
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(op1Regs.payloadGPR(), JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())),
+ data->fallThrough);
emitSwitchCharStringJump(data, op1Regs.payloadGPR(), tempGPR);
noResult(node, UseChildrenCalledExplicitly);
@@ -6064,6 +5005,18 @@ void SpeculativeJIT::emitSwitchChar(Node* node, SwitchData* data)
}
}
+bool SpeculativeJIT::StringSwitchCase::operator<(
+ const SpeculativeJIT::StringSwitchCase& other) const
+{
+ unsigned minLength = std::min(string->length(), other.string->length());
+ for (unsigned i = 0; i < minLength; ++i) {
+ if (string->at(i) == other.string->at(i))
+ continue;
+ return string->at(i) < other.string->at(i);
+ }
+ return string->length() < other.string->length();
+}
+
namespace {
struct CharacterCase {
@@ -6094,7 +5047,7 @@ void SpeculativeJIT::emitBinarySwitchStringRecurse(
}
if (begin == end) {
- jump(data->fallThrough.block, ForceJump);
+ jump(data->fallThrough, ForceJump);
return;
}
@@ -6130,14 +5083,14 @@ void SpeculativeJIT::emitBinarySwitchStringRecurse(
dataLog("length = ", minLength, ", commonChars = ", commonChars, ", allLengthsEqual = ", allLengthsEqual, "\n");
if (!allLengthsEqual && alreadyCheckedLength < minLength)
- branch32(MacroAssembler::Below, length, Imm32(minLength), data->fallThrough.block);
+ branch32(MacroAssembler::Below, length, Imm32(minLength), data->fallThrough);
if (allLengthsEqual && (alreadyCheckedLength < minLength || !checkedExactLength))
- branch32(MacroAssembler::NotEqual, length, Imm32(minLength), data->fallThrough.block);
+ branch32(MacroAssembler::NotEqual, length, Imm32(minLength), data->fallThrough);
for (unsigned i = numChecked; i < commonChars; ++i) {
branch8(
MacroAssembler::NotEqual, MacroAssembler::Address(buffer, i),
- TrustedImm32(cases[begin].string->at(i)), data->fallThrough.block);
+ TrustedImm32(cases[begin].string->at(i)), data->fallThrough);
}
if (minLength == commonChars) {
@@ -6207,7 +5160,7 @@ void SpeculativeJIT::emitBinarySwitchStringRecurse(
temp, minLength, allLengthsEqual);
}
- addBranch(binarySwitch.fallThrough(), data->fallThrough.block);
+ addBranch(binarySwitch.fallThrough(), data->fallThrough);
}
void SpeculativeJIT::emitSwitchStringOnString(SwitchData* data, GPRReg string)
@@ -6259,7 +5212,7 @@ void SpeculativeJIT::emitSwitchStringOnString(SwitchData* data, GPRReg string)
Vector<StringSwitchCase> cases;
for (unsigned i = 0; i < data->cases.size(); ++i) {
cases.append(
- StringSwitchCase(data->cases[i].value.stringImpl(), data->cases[i].target.block));
+ StringSwitchCase(data->cases[i].value.stringImpl(), data->cases[i].target));
}
std::sort(cases.begin(), cases.end());
@@ -6295,8 +5248,8 @@ void SpeculativeJIT::emitSwitchString(Node* node, SwitchData* data)
BinarySwitch binarySwitch(tempGPR, identifierCaseValues, BinarySwitch::IntPtr);
while (binarySwitch.advance(m_jit))
- jump(data->cases[binarySwitch.caseIndex()].target.block, ForceJump);
- addBranch(binarySwitch.fallThrough(), data->fallThrough.block);
+ jump(data->cases[binarySwitch.caseIndex()].target, ForceJump);
+ addBranch(binarySwitch.fallThrough(), data->fallThrough);
noResult(node);
break;
@@ -6322,9 +5275,24 @@ void SpeculativeJIT::emitSwitchString(Node* node, SwitchData* data)
op1.use();
- addBranch(m_jit.branchIfNotCell(op1Regs), data->fallThrough.block);
+#if USE(JSVALUE64)
+ addBranch(
+ m_jit.branchTest64(
+ MacroAssembler::NonZero, op1Regs.gpr(), GPRInfo::tagMaskRegister),
+ data->fallThrough);
+#else
+ addBranch(
+ m_jit.branch32(
+ MacroAssembler::NotEqual, op1Regs.tagGPR(), TrustedImm32(JSValue::CellTag)),
+ data->fallThrough);
+#endif
- addBranch(m_jit.branchIfNotString(op1Regs.payloadGPR()), data->fallThrough.block);
+ addBranch(
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(op1Regs.payloadGPR(), JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())),
+ data->fallThrough);
emitSwitchStringOnString(data, op1Regs.payloadGPR());
noResult(node, UseChildrenCalledExplicitly);
@@ -6352,10 +5320,6 @@ void SpeculativeJIT::emitSwitch(Node* node)
case SwitchString: {
emitSwitchString(node, data);
return;
- }
- case SwitchCell: {
- DFG_CRASH(m_jit.graph(), node, "Bad switch kind");
- return;
} }
RELEASE_ASSERT_NOT_REACHED();
}
@@ -6377,28 +5341,84 @@ void SpeculativeJIT::linkBranches()
#if ENABLE(GGC)
void SpeculativeJIT::compileStoreBarrier(Node* node)
{
- ASSERT(node->op() == StoreBarrier);
+ switch (node->op()) {
+ case ConditionalStoreBarrier: {
+ compileBaseValueStoreBarrier(node->child1(), node->child2());
+ break;
+ }
+
+ case StoreBarrier: {
+ SpeculateCellOperand base(this, node->child1());
+ GPRTemporary scratch1(this);
+ GPRTemporary scratch2(this);
- SpeculateCellOperand base(this, node->child1());
- GPRTemporary scratch1(this);
- GPRTemporary scratch2(this);
+ writeBarrier(base.gpr(), scratch1.gpr(), scratch2.gpr());
+ break;
+ }
+
+ case StoreBarrierWithNullCheck: {
+ JSValueOperand base(this, node->child1());
+ GPRTemporary scratch1(this);
+ GPRTemporary scratch2(this);
- writeBarrier(base.gpr(), scratch1.gpr(), scratch2.gpr());
+#if USE(JSVALUE64)
+ JITCompiler::Jump isNull = m_jit.branchTest64(JITCompiler::Zero, base.gpr());
+ writeBarrier(base.gpr(), scratch1.gpr(), scratch2.gpr());
+#else
+ JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, base.tagGPR(), TrustedImm32(JSValue::EmptyValueTag));
+ writeBarrier(base.payloadGPR(), scratch1.gpr(), scratch2.gpr());
+#endif
+ isNull.link(&m_jit);
+ break;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
noResult(node);
}
+JITCompiler::Jump SpeculativeJIT::genericWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2)
+{
+ jit.move(owner, scratch1);
+ jit.move(owner, scratch2);
+
+ jit.andPtr(MacroAssembler::TrustedImmPtr(MarkedBlock::blockMask), scratch1);
+ jit.andPtr(MacroAssembler::TrustedImmPtr(~MarkedBlock::blockMask), scratch2);
+
+ // Shift index
+#if USE(JSVALUE64)
+ jit.rshift64(MacroAssembler::TrustedImm32(MarkedBlock::atomShiftAmount + MarkedBlock::markByteShiftAmount), scratch2);
+#else
+ jit.rshift32(MacroAssembler::TrustedImm32(MarkedBlock::atomShiftAmount + MarkedBlock::markByteShiftAmount), scratch2);
+#endif
+
+ // Emit load and branch
+ return jit.branchTest8(MacroAssembler::Zero, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::TimesOne, MarkedBlock::offsetOfMarks()));
+}
+
+JITCompiler::Jump SpeculativeJIT::genericWriteBarrier(CCallHelpers& jit, JSCell* owner)
+{
+ MarkedBlock* block = MarkedBlock::blockFor(owner);
+ size_t markIndex = (reinterpret_cast<size_t>(owner) & ~MarkedBlock::blockMask) >> (MarkedBlock::atomShiftAmount + MarkedBlock::markByteShiftAmount);
+ uint8_t* address = reinterpret_cast<uint8_t*>(reinterpret_cast<char*>(block) + MarkedBlock::offsetOfMarks()) + markIndex;
+ return jit.branchTest8(MacroAssembler::Zero, MacroAssembler::AbsoluteAddress(address));
+}
+
void SpeculativeJIT::storeToWriteBarrierBuffer(GPRReg cell, GPRReg scratch1, GPRReg scratch2)
{
ASSERT(scratch1 != scratch2);
- WriteBarrierBuffer& writeBarrierBuffer = m_jit.vm()->heap.m_writeBarrierBuffer;
- m_jit.load32(writeBarrierBuffer.currentIndexAddress(), scratch2);
- JITCompiler::Jump needToFlush = m_jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::TrustedImm32(writeBarrierBuffer.capacity()));
+ WriteBarrierBuffer* writeBarrierBuffer = &m_jit.vm()->heap.m_writeBarrierBuffer;
+ m_jit.move(TrustedImmPtr(writeBarrierBuffer), scratch1);
+ m_jit.load32(MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()), scratch2);
+ JITCompiler::Jump needToFlush = m_jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::capacityOffset()));
m_jit.add32(TrustedImm32(1), scratch2);
- m_jit.store32(scratch2, writeBarrierBuffer.currentIndexAddress());
+ m_jit.store32(scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()));
- m_jit.move(TrustedImmPtr(writeBarrierBuffer.buffer()), scratch1);
+ m_jit.loadPtr(MacroAssembler::Address(scratch1, WriteBarrierBuffer::bufferOffset()), scratch1);
// We use an offset of -sizeof(void*) because we already added 1 to scratch2.
m_jit.storePtr(cell, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*))));
@@ -6412,11 +5432,67 @@ void SpeculativeJIT::storeToWriteBarrierBuffer(GPRReg cell, GPRReg scratch1, GPR
done.link(&m_jit);
}
+void SpeculativeJIT::storeToWriteBarrierBuffer(JSCell* cell, GPRReg scratch1, GPRReg scratch2)
+{
+ ASSERT(scratch1 != scratch2);
+ WriteBarrierBuffer* writeBarrierBuffer = &m_jit.vm()->heap.m_writeBarrierBuffer;
+ m_jit.move(TrustedImmPtr(writeBarrierBuffer), scratch1);
+ m_jit.load32(MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()), scratch2);
+ JITCompiler::Jump needToFlush = m_jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::capacityOffset()));
+
+ m_jit.add32(TrustedImm32(1), scratch2);
+ m_jit.store32(scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()));
+
+ m_jit.loadPtr(MacroAssembler::Address(scratch1, WriteBarrierBuffer::bufferOffset()), scratch1);
+ // We use an offset of -sizeof(void*) because we already added 1 to scratch2.
+ m_jit.storePtr(TrustedImmPtr(cell), MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*))));
+
+ JITCompiler::Jump done = m_jit.jump();
+ needToFlush.link(&m_jit);
+
+ // Call C slow path
+ silentSpillAllRegisters(InvalidGPRReg);
+ callOperation(operationFlushWriteBarrierBuffer, cell);
+ silentFillAllRegisters(InvalidGPRReg);
+
+ done.link(&m_jit);
+}
+
+void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, GPRReg scratch1, GPRReg scratch2)
+{
+ if (Heap::isMarked(value))
+ return;
+
+ JITCompiler::Jump definitelyNotMarked = genericWriteBarrier(m_jit, ownerGPR, scratch1, scratch2);
+ storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2);
+ definitelyNotMarked.link(&m_jit);
+}
+
+void SpeculativeJIT::osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2)
+{
+ JITCompiler::Jump definitelyNotMarked = genericWriteBarrier(jit, owner, scratch1, scratch2);
+
+ // We need these extra slots because setupArgumentsWithExecState will use poke on x86.
+#if CPU(X86)
+ jit.subPtr(TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
+#endif
+
+ jit.setupArgumentsWithExecState(owner);
+ jit.move(TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch1);
+ jit.call(scratch1);
+
+#if CPU(X86)
+ jit.addPtr(TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
+#endif
+
+ definitelyNotMarked.link(&jit);
+}
+
void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg scratch1, GPRReg scratch2)
{
- JITCompiler::Jump ownerIsRememberedOrInEden = m_jit.jumpIfIsRememberedOrInEden(ownerGPR);
+ JITCompiler::Jump definitelyNotMarked = genericWriteBarrier(m_jit, ownerGPR, scratch1, scratch2);
storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2);
- ownerIsRememberedOrInEden.link(&m_jit);
+ definitelyNotMarked.link(&m_jit);
}
#else
void SpeculativeJIT::compileStoreBarrier(Node* node)
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
index 93d637618..3534c7b15 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGSpeculativeJIT_h
#define DFGSpeculativeJIT_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGAbstractInterpreter.h"
@@ -54,7 +56,7 @@ class SpeculateDoubleOperand;
class SpeculateCellOperand;
class SpeculateBooleanOperand;
-enum GeneratedOperandType { GeneratedOperandTypeUnknown, GeneratedOperandInteger, GeneratedOperandJSValue};
+enum GeneratedOperandType { GeneratedOperandTypeUnknown, GeneratedOperandInteger, GeneratedOperandDouble, GeneratedOperandJSValue};
inline GPRReg extractResult(GPRReg result) { return result; }
#if USE(JSVALUE64)
@@ -75,8 +77,6 @@ inline NoResultTag extractResult(NoResultTag) { return NoResult; }
// to propagate type information (including information that has
// only speculatively been asserted) through the dataflow.
class SpeculativeJIT {
- WTF_MAKE_FAST_ALLOCATED;
-
friend struct OSRExit;
private:
typedef JITCompiler::TrustedImm32 TrustedImm32;
@@ -119,7 +119,6 @@ public:
~SpeculativeJIT();
bool compile();
-
void createOSREntries();
void linkOSREntries(LinkBuffer&);
@@ -190,6 +189,7 @@ public:
if (spillMe.isValid()) {
#if USE(JSVALUE32_64)
GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
+ RELEASE_ASSERT(info.registerFormat() != DataFormatJSDouble);
if ((info.registerFormat() & DataFormatJS))
m_gprs.release(info.tagGPR() == gpr ? info.payloadGPR() : info.tagGPR());
#endif
@@ -264,7 +264,7 @@ public:
else if (registerFormat != DataFormatNone)
m_gprs.release(info.gpr());
#elif USE(JSVALUE32_64)
- if (registerFormat == DataFormatDouble)
+ if (registerFormat == DataFormatDouble || registerFormat == DataFormatJSDouble)
m_fprs.release(info.fpr());
else if (registerFormat & DataFormatJS) {
m_gprs.release(info.tagGPR());
@@ -286,15 +286,21 @@ public:
}
bool masqueradesAsUndefinedWatchpointIsStillValid()
{
- return masqueradesAsUndefinedWatchpointIsStillValid(m_currentNode->origin.semantic);
+ return masqueradesAsUndefinedWatchpointIsStillValid(m_currentNode->codeOrigin);
}
#if ENABLE(GGC)
void storeToWriteBarrierBuffer(GPRReg cell, GPRReg scratch1, GPRReg scratch2);
+ void storeToWriteBarrierBuffer(JSCell*, GPRReg scratch1, GPRReg scratch2);
+ static JITCompiler::Jump genericWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2);
+ static JITCompiler::Jump genericWriteBarrier(CCallHelpers& jit, JSCell* owner);
+ static void osrWriteBarrier(CCallHelpers&, GPRReg owner, GPRReg scratch1, GPRReg scratch2);
void writeBarrier(GPRReg owner, GPRReg scratch1, GPRReg scratch2);
+ void writeBarrier(GPRReg owner, JSCell* value, GPRReg scratch1, GPRReg scratch2);
void writeBarrier(GPRReg owner, GPRReg value, Edge valueUse, GPRReg scratch1, GPRReg scratch2);
+ void writeBarrier(JSCell* owner, GPRReg value, Edge valueUse, GPRReg scratch1, GPRReg scratch2);
#endif
void compileStoreBarrier(Node*);
@@ -313,12 +319,12 @@ public:
GPRReg fillSpeculateBoolean(Edge);
GeneratedOperandType checkGeneratedTypeForToInt32(Node*);
- void addSlowPathGenerator(std::unique_ptr<SlowPathGenerator>);
+ void addSlowPathGenerator(PassOwnPtr<SlowPathGenerator>);
void runSlowPathGenerators();
void compile(Node*);
void noticeOSRBirth(Node*);
- void bail(AbortReason);
+ void bail();
void compileCurrentBlock();
void checkArgumentTypes();
@@ -455,10 +461,6 @@ public:
m_jit.unboxDouble(tagGPR, payloadGPR, fpr, scratchFPR);
}
#endif
- void boxDouble(FPRReg fpr, JSValueRegs regs)
- {
- m_jit.boxDouble(fpr, regs);
- }
// Spill a VirtualRegister to the JSStack.
void spill(VirtualRegister spillMe)
@@ -528,10 +530,11 @@ public:
return;
}
- case DataFormatDouble: {
+ case DataFormatDouble:
+ case DataFormatJSDouble: {
// On JSVALUE32_64 boxing a double is a no-op.
m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe));
- info.spill(*m_stream, spillMe, DataFormatDouble);
+ info.spill(*m_stream, spillMe, DataFormatJSDouble);
return;
}
@@ -553,7 +556,30 @@ public:
bool isKnownNotNumber(Node* node) { return !(m_state.forNode(node).m_type & SpecFullNumber); }
bool isKnownNotCell(Node* node) { return !(m_state.forNode(node).m_type & SpecCell); }
- UniquedStringImpl* identifierUID(unsigned index)
+ // Checks/accessors for constant values.
+ bool isConstant(Node* node) { return m_jit.graph().isConstant(node); }
+ bool isJSConstant(Node* node) { return m_jit.graph().isJSConstant(node); }
+ bool isInt32Constant(Node* node) { return m_jit.graph().isInt32Constant(node); }
+ bool isDoubleConstant(Node* node) { return m_jit.graph().isDoubleConstant(node); }
+ bool isNumberConstant(Node* node) { return m_jit.graph().isNumberConstant(node); }
+ bool isBooleanConstant(Node* node) { return m_jit.graph().isBooleanConstant(node); }
+ bool isFunctionConstant(Node* node) { return m_jit.graph().isFunctionConstant(node); }
+ int32_t valueOfInt32Constant(Node* node) { return m_jit.graph().valueOfInt32Constant(node); }
+ double valueOfNumberConstant(Node* node) { return m_jit.graph().valueOfNumberConstant(node); }
+#if USE(JSVALUE32_64)
+ void* addressOfDoubleConstant(Node* node) { return m_jit.addressOfDoubleConstant(node); }
+#endif
+ JSValue valueOfJSConstant(Node* node) { return m_jit.graph().valueOfJSConstant(node); }
+ bool valueOfBooleanConstant(Node* node) { return m_jit.graph().valueOfBooleanConstant(node); }
+ JSFunction* valueOfFunctionConstant(Node* node) { return m_jit.graph().valueOfFunctionConstant(node); }
+ bool isNullConstant(Node* node)
+ {
+ if (!isConstant(node))
+ return false;
+ return valueOfJSConstant(node).isNull();
+ }
+
+ StringImpl* identifierUID(unsigned index)
{
return m_jit.graph().identifiers()[index];
}
@@ -575,6 +601,7 @@ public:
}
}
+#ifndef NDEBUG
// Used to ASSERT flushRegisters() has been called prior to
// calling out from JIT code to a C helper function.
bool isFlushed()
@@ -589,11 +616,12 @@ public:
}
return true;
}
+#endif
#if USE(JSVALUE64)
- static MacroAssembler::Imm64 valueOfJSConstantAsImm64(Node* node)
+ MacroAssembler::Imm64 valueOfJSConstantAsImm64(Node* node)
{
- return MacroAssembler::Imm64(JSValue::encode(node->asJSValue()));
+ return MacroAssembler::Imm64(JSValue::encode(valueOfJSConstant(node)));
}
#endif
@@ -678,7 +706,7 @@ public:
}
// Check if the lastNode is a branch on this node.
- Node* lastNode = m_block->terminal();
+ Node* lastNode = m_block->last();
return lastNode->op() == Branch && lastNode->child1() == m_currentNode ? m_block->size() - 1 : UINT_MAX;
}
@@ -687,10 +715,10 @@ public:
#if USE(JSVALUE64)
void cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
- void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
+ void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
#elif USE(JSVALUE32_64)
void cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
- void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
+ void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
#endif
void compileIn(Node*);
@@ -709,11 +737,59 @@ public:
void nonSpeculativeNonPeepholeStrictEq(Node*, bool invert = false);
bool nonSpeculativeStrictEq(Node*, bool invert = false);
- void compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchAndResultReg, GPRReg scratch2Reg);
+ void compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchAndResultReg);
void compileInstanceOf(Node*);
+ ptrdiff_t calleeFrameOffset(int numArgs)
+ {
+ return virtualRegisterForLocal(m_jit.graph().m_nextMachineLocal + JSStack::CallFrameHeaderSize + numArgs).offset() * sizeof(Register);
+ }
+
+ // Access to our fixed callee CallFrame.
+ MacroAssembler::Address calleeFrameSlot(int numArgs, int slot)
+ {
+ return MacroAssembler::Address(GPRInfo::callFrameRegister, calleeFrameOffset(numArgs) + sizeof(Register) * slot);
+ }
+
+ // Access to our fixed callee CallFrame.
+ MacroAssembler::Address calleeArgumentSlot(int numArgs, int argument)
+ {
+ return calleeFrameSlot(numArgs, virtualRegisterForArgument(argument).offset());
+ }
+
+ MacroAssembler::Address calleeFrameTagSlot(int numArgs, int slot)
+ {
+ return calleeFrameSlot(numArgs, slot).withOffset(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ }
+
+ MacroAssembler::Address calleeFramePayloadSlot(int numArgs, int slot)
+ {
+ return calleeFrameSlot(numArgs, slot).withOffset(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ }
+
+ MacroAssembler::Address calleeArgumentTagSlot(int numArgs, int argument)
+ {
+ return calleeArgumentSlot(numArgs, argument).withOffset(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ }
+
+ MacroAssembler::Address calleeArgumentPayloadSlot(int numArgs, int argument)
+ {
+ return calleeArgumentSlot(numArgs, argument).withOffset(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ }
+
+ MacroAssembler::Address calleeFrameCallerFrame(int numArgs)
+ {
+ return calleeFrameSlot(numArgs, 0).withOffset(CallFrame::callerFrameOffset());
+ }
+
void emitCall(Node*);
+ int32_t framePointerOffsetToGetActivationRegisters()
+ {
+ return m_jit.codeBlock()->framePointerOffsetToGetActivationRegisters(
+ m_jit.graph().m_machineCaptureStart);
+ }
+
// Called once a node has completed code generation but prior to setting
// its result, to free up its children. (This must happen prior to setting
// the nodes result, since the node may have the same VirtualRegister as
@@ -784,20 +860,15 @@ public:
GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
info.initCell(node, node->refCount(), reg);
}
- void blessedBooleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
- {
-#if USE(JSVALUE64)
- jsValueResult(reg, node, DataFormatJSBoolean, mode);
-#else
- booleanResult(reg, node, mode);
-#endif
- }
- void unblessedBooleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
+ void booleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
{
-#if USE(JSVALUE64)
- blessBoolean(reg);
-#endif
- blessedBooleanResult(reg, node, mode);
+ if (mode == CallUseChildren)
+ useChildren(node);
+
+ VirtualRegister virtualRegister = node->virtualRegister();
+ m_gprs.retain(reg, virtualRegister, SpillOrderBoolean);
+ GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
+ info.initBoolean(node, node->refCount(), reg);
}
#if USE(JSVALUE64)
void jsValueResult(GPRReg reg, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
@@ -818,16 +889,6 @@ public:
jsValueResult(reg, node, DataFormatJS, mode);
}
#elif USE(JSVALUE32_64)
- void booleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
- {
- if (mode == CallUseChildren)
- useChildren(node);
-
- VirtualRegister virtualRegister = node->virtualRegister();
- m_gprs.retain(reg, virtualRegister, SpillOrderBoolean);
- GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
- info.initBoolean(node, node->refCount(), reg);
- }
void jsValueResult(GPRReg tag, GPRReg payload, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
{
if (mode == CallUseChildren)
@@ -844,14 +905,6 @@ public:
jsValueResult(tag, payload, node, DataFormatJS, mode);
}
#endif
- void jsValueResult(JSValueRegs regs, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
- {
-#if USE(JSVALUE64)
- jsValueResult(regs.gpr(), node, format, mode);
-#else
- jsValueResult(regs.tagGPR(), regs.payloadGPR(), node, format, mode);
-#endif
- }
void storageResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
{
if (mode == CallUseChildren)
@@ -874,7 +927,7 @@ public:
}
void initConstantInfo(Node* node)
{
- ASSERT(node->hasConstant());
+ ASSERT(isInt32Constant(node) || isNumberConstant(node) || isJSConstant(node));
generationInfo(node).initConstant(node, node->refCount());
}
@@ -884,11 +937,6 @@ public:
// machine registers, and delegate the calling convention specific
// decision as to how to fill the regsiters to setupArguments* methods.
- JITCompiler::Call callOperation(V_JITOperation_E operation)
- {
- m_jit.setupArgumentsExecState();
- return appendCallWithExceptionCheck(operation);
- }
JITCompiler::Call callOperation(P_JITOperation_E operation, GPRReg result)
{
m_jit.setupArgumentsExecState();
@@ -979,17 +1027,7 @@ public:
m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(C_JITOperation_ECZ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
- {
- m_jit.setupArgumentsWithExecState(arg1, arg2);
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
- JITCompiler::Call callOperation(C_JITOperation_ECZC operation, GPRReg result, GPRReg arg1, GPRReg arg2, GPRReg arg3)
- {
- m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
- JITCompiler::Call callOperation(C_JITOperation_EJscC operation, GPRReg result, GPRReg arg1, JSCell* cell)
+ JITCompiler::Call callOperation(C_JITOperation_ECC operation, GPRReg result, GPRReg arg1, JSCell* cell)
{
m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
return appendCallWithExceptionCheckSetResult(operation, result);
@@ -1004,40 +1042,6 @@ public:
m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure));
return appendCallWithExceptionCheckSetResult(operation, result);
}
-
-#if USE(JSVALUE64)
- JITCompiler::Call callOperation(C_JITOperation_EStJscSymtabJ operation, GPRReg result, Structure* structure, GPRReg scope, SymbolTable* table, TrustedImm64 initialValue)
- {
- m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), scope, TrustedImmPtr(table), initialValue);
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
-#else
- JITCompiler::Call callOperation(C_JITOperation_EStJscSymtabJ operation, GPRReg result, Structure* structure, GPRReg scope, SymbolTable* table, TrustedImm32 tag, TrustedImm32 payload)
- {
- m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), scope, TrustedImmPtr(table), payload, tag);
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
-#endif
- JITCompiler::Call callOperation(C_JITOperation_EStZ operation, GPRReg result, Structure* structure, unsigned knownLength)
- {
- m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImm32(knownLength));
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
- JITCompiler::Call callOperation(C_JITOperation_EStZZ operation, GPRReg result, Structure* structure, unsigned knownLength, unsigned minCapacity)
- {
- m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImm32(knownLength), TrustedImm32(minCapacity));
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
- JITCompiler::Call callOperation(C_JITOperation_EStZ operation, GPRReg result, Structure* structure, GPRReg length)
- {
- m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), length);
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
- JITCompiler::Call callOperation(C_JITOperation_EStZZ operation, GPRReg result, Structure* structure, GPRReg length, unsigned minCapacity)
- {
- m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), length, TrustedImm32(minCapacity));
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
JITCompiler::Call callOperation(C_JITOperation_EJssSt operation, GPRReg result, GPRReg arg1, Structure* structure)
{
m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(structure));
@@ -1060,18 +1064,6 @@ public:
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(S_JITOperation_EGC operation, GPRReg result, JSGlobalObject* globalObject, GPRReg arg2)
- {
- m_jit.setupArgumentsWithExecState(TrustedImmPtr(globalObject), arg2);
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
-
- JITCompiler::Call callOperation(C_JITOperation_EGC operation, GPRReg result, JSGlobalObject* globalObject, GPRReg arg2)
- {
- m_jit.setupArgumentsWithExecState(TrustedImmPtr(globalObject), arg2);
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
-
JITCompiler::Call callOperation(Jss_JITOperation_EZ operation, GPRReg result, GPRReg arg1)
{
m_jit.setupArgumentsWithExecState(arg1);
@@ -1122,6 +1114,12 @@ public:
return appendCallWithExceptionCheck(operation);
}
+ JITCompiler::Call callOperation(V_JITOperation_EVws operation, VariableWatchpointSet* watchpointSet)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(watchpointSet));
+ return appendCall(operation);
+ }
+
JITCompiler::Call callOperationWithCallFrameRollbackOnException(V_JITOperation_ECb operation, void* pointer)
{
m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer));
@@ -1133,17 +1131,7 @@ public:
m_jit.setupArgumentsExecState();
return appendCallWithCallFrameRollbackOnExceptionSetResult(operation, result);
}
- JITCompiler::Call callOperation(Z_JITOperation_EC operation, GPRReg result, GPRReg arg1)
- {
- m_jit.setupArgumentsWithExecState(arg1);
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
- template<typename FunctionType>
- JITCompiler::Call callOperation(FunctionType operation, NoResultTag)
- {
- return callOperation(operation);
- }
template<typename FunctionType, typename ArgumentType1>
JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1)
{
@@ -1185,16 +1173,11 @@ public:
m_jit.setupArguments(arg1, arg2);
return appendCallSetResult(operation, result);
}
- JITCompiler::Call callOperation(T_JITOperation_EJss operation, GPRReg result, GPRReg arg1)
+ JITCompiler::Call callOperation(I_JITOperation_EJss operation, GPRReg result, GPRReg arg1)
{
m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(C_JITOperation_EJscZ operation, GPRReg result, GPRReg arg1, int32_t arg2)
- {
- m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2));
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
JITCompiler::Call callOperation(C_JITOperation_EZ operation, GPRReg result, GPRReg arg1)
{
m_jit.setupArgumentsWithExecState(arg1);
@@ -1206,18 +1189,6 @@ public:
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_JITOperation_EJscC operation, GPRReg result, GPRReg arg1, JSCell* cell)
- {
- m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
-
- JITCompiler::Call callOperation(V_JITOperation_EWs operation, WatchpointSet* watchpointSet)
- {
- m_jit.setupArgumentsWithExecState(TrustedImmPtr(watchpointSet));
- return appendCall(operation);
- }
-
#if USE(JSVALUE64)
JITCompiler::Call callOperation(J_JITOperation_E operation, GPRReg result)
{
@@ -1236,17 +1207,7 @@ public:
m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result);
return call;
}
- JITCompiler::Call callOperation(Q_JITOperation_J operation, GPRReg result, GPRReg value)
- {
- m_jit.setupArguments(value);
- return appendCallSetResult(operation, result);
- }
- JITCompiler::Call callOperation(Q_JITOperation_D operation, GPRReg result, FPRReg value)
- {
- m_jit.setupArguments(value);
- return appendCallSetResult(operation, result);
- }
- JITCompiler::Call callOperation(J_JITOperation_EI operation, GPRReg result, UniquedStringImpl* uid)
+ JITCompiler::Call callOperation(J_JITOperation_EI operation, GPRReg result, StringImpl* uid)
{
m_jit.setupArgumentsWithExecState(TrustedImmPtr(uid));
return appendCallWithExceptionCheckSetResult(operation, result);
@@ -1286,17 +1247,12 @@ public:
m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_JITOperation_ECZ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
- {
- m_jit.setupArgumentsWithExecState(arg1, arg2);
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
- JITCompiler::Call callOperation(J_JITOperation_ESsiCI operation, GPRReg result, StructureStubInfo* stubInfo, GPRReg arg1, const UniquedStringImpl* uid)
+ JITCompiler::Call callOperation(J_JITOperation_ESsiCI operation, GPRReg result, StructureStubInfo* stubInfo, GPRReg arg1, const StringImpl* uid)
{
m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg result, StructureStubInfo* stubInfo, GPRReg arg1, UniquedStringImpl* uid)
+ JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg result, StructureStubInfo* stubInfo, GPRReg arg1, StringImpl* uid)
{
m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid));
return appendCallWithExceptionCheckSetResult(operation, result);
@@ -1306,16 +1262,6 @@ public:
m_jit.setupArgumentsWithExecState(arg1, arg2);
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_JITOperation_EJC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
- {
- m_jit.setupArgumentsWithExecState(arg1, arg2);
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
- JITCompiler::Call callOperation(J_JITOperation_EJZ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
- {
- m_jit.setupArgumentsWithExecState(arg1, arg2);
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
JITCompiler::Call callOperation(J_JITOperation_EJA operation, GPRReg result, GPRReg arg1, GPRReg arg2)
{
m_jit.setupArgumentsWithExecState(arg1, arg2);
@@ -1364,21 +1310,6 @@ public:
m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(C_JITOperation_EJJC operation, GPRReg result, GPRReg arg1, GPRReg arg2, GPRReg arg3)
- {
- m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
- JITCompiler::Call callOperation(C_JITOperation_EJZ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
- {
- m_jit.setupArgumentsWithExecState(arg1, arg2);
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
- JITCompiler::Call callOperation(C_JITOperation_EJZC operation, GPRReg result, GPRReg arg1, GPRReg arg2, GPRReg arg3)
- {
- m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
JITCompiler::Call callOperation(S_JITOperation_J operation, GPRReg result, GPRReg arg1)
{
m_jit.setupArguments(arg1);
@@ -1451,7 +1382,7 @@ public:
m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(pointer));
return appendCallWithExceptionCheck(operation);
}
- JITCompiler::Call callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, GPRReg arg1, GPRReg arg2, UniquedStringImpl* uid)
+ JITCompiler::Call callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, GPRReg arg1, GPRReg arg2, StringImpl* uid)
{
m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, arg2, TrustedImmPtr(uid));
return appendCallWithExceptionCheck(operation);
@@ -1484,26 +1415,6 @@ public:
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(Z_JITOperation_EJZZ operation, GPRReg result, GPRReg arg1, unsigned arg2, unsigned arg3)
- {
- m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2), TrustedImm32(arg3));
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
- JITCompiler::Call callOperation(F_JITOperation_EFJZZ operation, GPRReg result, GPRReg arg1, GPRReg arg2, unsigned arg3, GPRReg arg4)
- {
- m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImm32(arg3), arg4);
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
- JITCompiler::Call callOperation(Z_JITOperation_EJZ operation, GPRReg result, GPRReg arg1, unsigned arg2)
- {
- m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2));
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
- JITCompiler::Call callOperation(V_JITOperation_EZJZZZ operation, unsigned arg1, GPRReg arg2, unsigned arg3, GPRReg arg4, unsigned arg5)
- {
- m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), arg2, TrustedImm32(arg3), arg4, TrustedImm32(arg5));
- return appendCallWithExceptionCheck(operation);
- }
#else // USE(JSVALUE32_64)
// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]).
@@ -1551,7 +1462,7 @@ public:
m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_JITOperation_EI operation, GPRReg resultTag, GPRReg resultPayload, UniquedStringImpl* uid)
+ JITCompiler::Call callOperation(J_JITOperation_EI operation, GPRReg resultTag, GPRReg resultPayload, StringImpl* uid)
{
m_jit.setupArgumentsWithExecState(TrustedImmPtr(uid));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
@@ -1566,16 +1477,6 @@ public:
m_jit.setupArgumentsWithExecState(arg1, arg2);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_JITOperation_EJ operation, GPRReg resultPayload, GPRReg resultTag, GPRReg arg1)
- {
- m_jit.setupArgumentsWithExecState(arg1);
- return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
- }
- JITCompiler::Call callOperation(J_JITOperation_EJC operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2)
- {
- m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2);
- return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
- }
JITCompiler::Call callOperation(J_JITOperation_EJssZ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2)
{
m_jit.setupArgumentsWithExecState(arg1, arg2);
@@ -1607,27 +1508,17 @@ public:
m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_JITOperation_ECZ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2)
- {
- m_jit.setupArgumentsWithExecState(arg1, arg2);
- return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
- }
- JITCompiler::Call callOperation(J_JITOperation_EJscC operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, JSCell* cell)
- {
- m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
- return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
- }
- JITCompiler::Call callOperation(J_JITOperation_ESsiCI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, GPRReg arg1, const UniquedStringImpl* uid)
+ JITCompiler::Call callOperation(J_JITOperation_ESsiCI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, GPRReg arg1, const StringImpl* uid)
{
m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, UniquedStringImpl* uid)
+ JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, StringImpl* uid)
{
m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, TrustedImmPtr(uid));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, int32_t arg1Tag, GPRReg arg1Payload, UniquedStringImpl* uid)
+ JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, int32_t arg1Tag, GPRReg arg1Payload, StringImpl* uid)
{
m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, TrustedImm32(arg1Tag), TrustedImmPtr(uid));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
@@ -1727,11 +1618,6 @@ public:
m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_JITOperation_ECJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2Payload)
- {
- m_jit.setupArgumentsWithExecState(arg1, arg2Payload, MacroAssembler::TrustedImm32(JSValue::CellTag));
- return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
- }
JITCompiler::Call callOperation(J_JITOperation_ECJ operation, JSValueRegs result, GPRReg arg1, JSValueRegs arg2)
{
m_jit.setupArgumentsWithExecState(arg1, arg2.payloadGPR(), arg2.tagGPR());
@@ -1751,7 +1637,7 @@ public:
JITCompiler::Call callOperation(V_JITOperation_EJ operation, GPRReg arg1Tag, GPRReg arg1Payload)
{
- m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag);
+ m_jit.setupArgumentsWithExecState(arg1Tag, arg1Payload);
return appendCallWithExceptionCheck(operation);
}
@@ -1760,7 +1646,7 @@ public:
m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2, TrustedImmPtr(pointer));
return appendCallWithExceptionCheck(operation);
}
- JITCompiler::Call callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Payload, UniquedStringImpl* uid)
+ JITCompiler::Call callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Payload, StringImpl* uid)
{
m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, arg2Payload, TrustedImm32(JSValue::CellTag), TrustedImmPtr(uid));
return appendCallWithExceptionCheck(operation);
@@ -1794,26 +1680,6 @@ public:
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(Z_JITOperation_EJZZ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, unsigned arg2, unsigned arg3)
- {
- m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImm32(arg2), TrustedImm32(arg3));
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
- JITCompiler::Call callOperation(F_JITOperation_EFJZZ operation, GPRReg result, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, unsigned arg3, GPRReg arg4)
- {
- m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, TrustedImm32(arg3), arg4);
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
- JITCompiler::Call callOperation(Z_JITOperation_EJZ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, unsigned arg2)
- {
- m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImm32(arg2));
- return appendCallWithExceptionCheckSetResult(operation, result);
- }
- JITCompiler::Call callOperation(V_JITOperation_EZJZZZ operation, unsigned arg1, GPRReg arg2Tag, GPRReg arg2Payload, unsigned arg3, GPRReg arg4, unsigned arg5)
- {
- m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), arg2Payload, arg2Tag, TrustedImm32(arg3), arg4, TrustedImm32(arg5));
- return appendCallWithExceptionCheck(operation);
- }
#undef EABI_32BIT_DUMMY_ARG
#undef SH4_32BIT_DUMMY_ARG
@@ -1888,7 +1754,7 @@ public:
JITCompiler::Call appendCallWithExceptionCheck(const FunctionPtr& function)
{
prepareForExternalCall();
- m_jit.emitStoreCodeOrigin(m_currentNode->origin.semantic);
+ m_jit.emitStoreCodeOrigin(m_currentNode->codeOrigin);
JITCompiler::Call call = m_jit.appendCall(function);
m_jit.exceptionCheck();
return call;
@@ -1896,7 +1762,7 @@ public:
JITCompiler::Call appendCallWithCallFrameRollbackOnException(const FunctionPtr& function)
{
prepareForExternalCall();
- m_jit.emitStoreCodeOrigin(m_currentNode->origin.semantic);
+ m_jit.emitStoreCodeOrigin(m_currentNode->codeOrigin);
JITCompiler::Call call = m_jit.appendCall(function);
m_jit.exceptionCheckWithCallFrameRollback();
return call;
@@ -1918,7 +1784,7 @@ public:
JITCompiler::Call appendCallSetResult(const FunctionPtr& function, GPRReg result)
{
prepareForExternalCall();
- m_jit.emitStoreCodeOrigin(m_currentNode->origin.semantic);
+ m_jit.emitStoreCodeOrigin(m_currentNode->codeOrigin);
JITCompiler::Call call = m_jit.appendCall(function);
if (result != InvalidGPRReg)
m_jit.move(GPRInfo::returnValueGPR, result);
@@ -1927,7 +1793,7 @@ public:
JITCompiler::Call appendCall(const FunctionPtr& function)
{
prepareForExternalCall();
- m_jit.emitStoreCodeOrigin(m_currentNode->origin.semantic);
+ m_jit.emitStoreCodeOrigin(m_currentNode->codeOrigin);
return m_jit.appendCall(function);
}
JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result1, GPRReg result2)
@@ -2081,6 +1947,17 @@ public:
void dump(const char* label = 0);
+ bool isInteger(Node* node)
+ {
+ if (node->hasInt32Result())
+ return true;
+
+ if (isInt32Constant(node))
+ return true;
+
+ return generationInfo(node).isJSInt32();
+ }
+
bool betterUseStrictInt52(Node* node)
{
return !generationInfo(node).isInt52();
@@ -2097,27 +1974,16 @@ public:
void compilePeepHoleBooleanBranch(Node*, Node* branchNode, JITCompiler::RelationalCondition);
void compilePeepHoleDoubleBranch(Node*, Node* branchNode, JITCompiler::DoubleCondition);
void compilePeepHoleObjectEquality(Node*, Node* branchNode);
- void compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode);
void compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode);
void compileObjectEquality(Node*);
- void compileObjectStrictEquality(Edge objectChild, Edge otherChild);
void compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild);
void compileObjectOrOtherLogicalNot(Edge value);
void compileLogicalNot(Node*);
- void compileStringEquality(
- Node*, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR,
- GPRReg leftTempGPR, GPRReg rightTempGPR, GPRReg leftTemp2GPR,
- GPRReg rightTemp2GPR, JITCompiler::JumpList fastTrue,
- JITCompiler::JumpList fastSlow);
void compileStringEquality(Node*);
void compileStringIdentEquality(Node*);
- void compileStringToUntypedEquality(Node*, Edge stringEdge, Edge untypedEdge);
- void compileStringIdentToNotStringVarEquality(Node*, Edge stringEdge, Edge notStringVarEdge);
void compileStringZeroLength(Node*);
- void compileMiscStrictEq(Node*);
void emitObjectOrOtherBranch(Edge value, BasicBlock* taken, BasicBlock* notTaken);
- void emitStringBranch(Edge value, BasicBlock* taken, BasicBlock* notTaken);
void emitBranch(Node*);
struct StringSwitchCase {
@@ -2129,10 +1995,7 @@ public:
{
}
- bool operator<(const StringSwitchCase& other) const
- {
- return stringLessThan(*string, *other.string);
- }
+ bool operator<(const StringSwitchCase& other) const;
StringImpl* string;
BasicBlock* target;
@@ -2150,7 +2013,7 @@ public:
void emitSwitchString(Node*, SwitchData*);
void emitSwitch(Node*);
- void compileToStringOrCallStringConstructorOnCell(Node*);
+ void compileToStringOnCell(Node*);
void compileNewStringObject(Node*);
void compileNewTypedArray(Node*);
@@ -2160,6 +2023,8 @@ public:
void compileBooleanCompare(Node*, MacroAssembler::RelationalCondition);
void compileDoubleCompare(Node*, MacroAssembler::DoubleCondition);
+ bool compileStrictEqForConstant(Node*, Edge value, JSValue constant);
+
bool compileStrictEq(Node*);
void compileAllocatePropertyStorage(Node*);
@@ -2184,34 +2049,22 @@ public:
void compileGetByValOnString(Node*);
void compileFromCharCode(Node*);
- void compileGetByValOnDirectArguments(Node*);
- void compileGetByValOnScopedArguments(Node*);
+ void compileGetByValOnArguments(Node*);
+ void compileGetArgumentsLength(Node*);
- void compileGetScope(Node*);
- void compileSkipScope(Node*);
-
void compileGetArrayLength(Node*);
-
- void compileCheckIdent(Node*);
-
- void compileValueRep(Node*);
- void compileDoubleRep(Node*);
void compileValueToInt32(Node*);
void compileUInt32ToNumber(Node*);
void compileDoubleAsInt32(Node*);
+ void compileInt32ToDouble(Node*);
void compileAdd(Node*);
void compileMakeRope(Node*);
- void compileArithClz32(Node*);
void compileArithSub(Node*);
void compileArithNegate(Node*);
void compileArithMul(Node*);
void compileArithDiv(Node*);
void compileArithMod(Node*);
- void compileArithPow(Node*);
- void compileArithRound(Node*);
- void compileArithSqrt(Node*);
- void compileArithLog(Node*);
void compileConstantStoragePointer(Node*);
void compileGetIndexedPropertyStorage(Node*);
JITCompiler::Jump jumpForTypedArrayOutOfBounds(Node*, GPRReg baseGPR, GPRReg indexGPR);
@@ -2221,23 +2074,9 @@ public:
void compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node*, TypedArrayType);
void compileGetByValOnFloatTypedArray(Node*, TypedArrayType);
void compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node*, TypedArrayType);
- void compileNewFunction(Node*);
- void compileForwardVarargs(Node*);
- void compileCreateActivation(Node*);
- void compileCreateDirectArguments(Node*);
- void compileGetFromArguments(Node*);
- void compilePutToArguments(Node*);
- void compileCreateScopedArguments(Node*);
- void compileCreateClonedArguments(Node*);
- void compileNotifyWrite(Node*);
+ void compileNewFunctionNoCheck(Node*);
+ void compileNewFunctionExpression(Node*);
bool compileRegExpExec(Node*);
- void compileIsObjectOrNull(Node*);
- void compileIsFunction(Node*);
- void compileTypeOf(Node*);
-
- void moveTrueTo(GPRReg);
- void moveFalseTo(GPRReg);
- void blessBoolean(GPRReg);
// size can be an immediate or a register, and must be in bytes. If size is a register,
// it must be a different register than resultGPR. Emits code that place a pointer to
@@ -2251,7 +2090,7 @@ public:
#ifndef NDEBUG
m_jit.move(size, resultGPR);
MacroAssembler::Jump nonZeroSize = m_jit.branchTest32(MacroAssembler::NonZero, resultGPR);
- m_jit.abortWithReason(DFGBasicStorageAllocatorZeroSize);
+ m_jit.breakpoint();
nonZeroSize.link(&m_jit);
#endif
@@ -2263,7 +2102,7 @@ public:
return slowPath;
}
-
+
// Allocator for a cell of a specific size.
template <typename StructureType> // StructureType can be GPR or ImmPtr.
void emitAllocateJSCell(GPRReg resultGPR, GPRReg allocatorGPR, StructureType structure,
@@ -2278,7 +2117,7 @@ public:
m_jit.storePtr(scratchGPR, MacroAssembler::Address(allocatorGPR, MarkedAllocator::offsetOfFreeListHead()));
// Initialize the object's Structure.
- m_jit.emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR);
+ m_jit.storePtr(structure, MacroAssembler::Address(resultGPR, JSCell::structureOffset()));
}
// Allocator for an object of a specific size.
@@ -2292,69 +2131,30 @@ public:
m_jit.storePtr(storage, MacroAssembler::Address(resultGPR, JSObject::butterflyOffset()));
}
- template <typename ClassType, typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr.
- void emitAllocateJSObjectWithKnownSize(
- GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1,
- GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath, size_t size)
- {
- MarkedAllocator* allocator = &m_jit.vm()->heap.allocatorForObjectOfType<ClassType>(size);
- m_jit.move(TrustedImmPtr(allocator), scratchGPR1);
- emitAllocateJSObject(resultGPR, scratchGPR1, structure, storage, scratchGPR2, slowPath);
- }
-
- // Convenience allocator for a built-in object.
+ // Convenience allocator for a buit-in object.
template <typename ClassType, typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr.
void emitAllocateJSObject(GPRReg resultGPR, StructureType structure, StorageType storage,
GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
{
- emitAllocateJSObjectWithKnownSize<ClassType>(
- resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath,
- ClassType::allocationSize(0));
- }
-
- template <typename ClassType, typename StructureType> // StructureType and StorageType can be GPR or ImmPtr.
- void emitAllocateVariableSizedJSObject(GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
- {
- static_assert(!(MarkedSpace::preciseStep & (MarkedSpace::preciseStep - 1)), "MarkedSpace::preciseStep must be a power of two.");
- static_assert(!(MarkedSpace::impreciseStep & (MarkedSpace::impreciseStep - 1)), "MarkedSpace::impreciseStep must be a power of two.");
-
- MarkedSpace::Subspace& subspace = m_jit.vm()->heap.subspaceForObjectOfType<ClassType>();
- m_jit.add32(TrustedImm32(MarkedSpace::preciseStep - 1), allocationSize);
- MacroAssembler::Jump notSmall = m_jit.branch32(MacroAssembler::AboveOrEqual, allocationSize, TrustedImm32(MarkedSpace::preciseCutoff));
- m_jit.rshift32(allocationSize, TrustedImm32(getLSBSet(MarkedSpace::preciseStep)), scratchGPR1);
- m_jit.mul32(TrustedImm32(sizeof(MarkedAllocator)), scratchGPR1, scratchGPR1);
- m_jit.addPtr(MacroAssembler::TrustedImmPtr(&subspace.preciseAllocators[0]), scratchGPR1);
-
- MacroAssembler::Jump selectedSmallSpace = m_jit.jump();
- notSmall.link(&m_jit);
- slowPath.append(m_jit.branch32(MacroAssembler::AboveOrEqual, allocationSize, TrustedImm32(MarkedSpace::impreciseCutoff)));
- m_jit.rshift32(allocationSize, TrustedImm32(getLSBSet(MarkedSpace::impreciseStep)), scratchGPR1);
- m_jit.mul32(TrustedImm32(sizeof(MarkedAllocator)), scratchGPR1, scratchGPR1);
- m_jit.addPtr(MacroAssembler::TrustedImmPtr(&subspace.impreciseAllocators[0]), scratchGPR1);
-
- selectedSmallSpace.link(&m_jit);
-
- emitAllocateJSObject(resultGPR, scratchGPR1, structure, TrustedImmPtr(0), scratchGPR2, slowPath);
- }
-
- template <typename T>
- void emitAllocateDestructibleObject(GPRReg resultGPR, Structure* structure,
- GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
- {
- emitAllocateJSObject<T>(resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR1, scratchGPR2, slowPath);
- m_jit.storePtr(TrustedImmPtr(structure->classInfo()), MacroAssembler::Address(resultGPR, JSDestructibleObject::classInfoOffset()));
+ MarkedAllocator* allocator = 0;
+ size_t size = ClassType::allocationSize(0);
+ if (ClassType::needsDestruction && ClassType::hasImmortalStructure)
+ allocator = &m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(size);
+ else if (ClassType::needsDestruction)
+ allocator = &m_jit.vm()->heap.allocatorForObjectWithNormalDestructor(size);
+ else
+ allocator = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(size);
+ m_jit.move(TrustedImmPtr(allocator), scratchGPR1);
+ emitAllocateJSObject(resultGPR, scratchGPR1, structure, storage, scratchGPR2, slowPath);
}
void emitAllocateJSArray(GPRReg resultGPR, Structure*, GPRReg storageGPR, unsigned numElements);
-
- void emitGetLength(InlineCallFrame*, GPRReg lengthGPR, bool includeThis = false);
- void emitGetLength(CodeOrigin, GPRReg lengthGPR, bool includeThis = false);
- void emitGetCallee(CodeOrigin, GPRReg calleeGPR);
- void emitGetArgumentStart(CodeOrigin, GPRReg startGPR);
-
- // Generate an OSR exit fuzz check. Returns Jump() if OSR exit fuzz is not enabled, or if
- // it's in training mode.
- MacroAssembler::Jump emitOSRExitFuzzCheck();
+
+#if USE(JSVALUE64)
+ JITCompiler::Jump convertToDouble(GPRReg value, FPRReg result, GPRReg tmp);
+#elif USE(JSVALUE32_64)
+ JITCompiler::Jump convertToDouble(JSValueOperand&, FPRReg result);
+#endif
// Add a speculation check.
void speculationCheck(ExitKind, JSValueSource, Node*, MacroAssembler::Jump jumpToFail);
@@ -2378,23 +2178,14 @@ public:
// Helpers for performing type checks on an edge stored in the given registers.
bool needsTypeCheck(Edge edge, SpeculatedType typesPassedThrough) { return m_interpreter.needsTypeCheck(edge, typesPassedThrough); }
void typeCheck(JSValueSource, Edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail);
-
- void speculateCellTypeWithoutTypeFiltering(Edge, GPRReg cellGPR, JSType);
- void speculateCellType(Edge, GPRReg cellGPR, SpeculatedType, JSType);
-
+
void speculateInt32(Edge);
-#if USE(JSVALUE64)
- void convertMachineInt(Edge, GPRReg resultGPR);
void speculateMachineInt(Edge);
- void speculateDoubleRepMachineInt(Edge);
-#endif // USE(JSVALUE64)
void speculateNumber(Edge);
void speculateRealNumber(Edge);
- void speculateDoubleRepReal(Edge);
void speculateBoolean(Edge);
void speculateCell(Edge);
void speculateObject(Edge);
- void speculateFunction(Edge);
void speculateFinalObject(Edge);
void speculateObjectOrOther(Edge);
void speculateString(Edge edge, GPRReg cell);
@@ -2402,18 +2193,13 @@ public:
void speculateStringIdent(Edge edge, GPRReg string);
void speculateStringIdent(Edge);
void speculateString(Edge);
- void speculateNotStringVar(Edge);
template<typename StructureLocationType>
void speculateStringObjectForStructure(Edge, StructureLocationType);
void speculateStringObject(Edge, GPRReg);
void speculateStringObject(Edge);
void speculateStringOrStringObject(Edge);
- void speculateSymbol(Edge, GPRReg cell);
- void speculateSymbol(Edge);
void speculateNotCell(Edge);
void speculateOther(Edge);
- void speculateMisc(Edge, JSValueRegs);
- void speculateMisc(Edge);
void speculate(Node*, Edge);
JITCompiler::Jump jumpSlowForUnwantedArrayMode(GPRReg tempWithIndexingTypeReg, ArrayMode, IndexingType);
@@ -2464,7 +2250,6 @@ public:
// The current node being generated.
BasicBlock* m_block;
Node* m_currentNode;
- NodeType m_lastGeneratedNode;
bool m_canExit;
unsigned m_indexInBlock;
// Virtual and physical register maps.
@@ -2497,7 +2282,7 @@ public:
bool m_isCheckingArgumentTypes;
- Vector<std::unique_ptr<SlowPathGenerator>, 8> m_slowPathGenerators;
+ Vector<OwnPtr<SlowPathGenerator>, 8> m_slowPathGenerators;
Vector<SilentRegisterSavePlan> m_plans;
};
@@ -2742,23 +2527,6 @@ private:
GPRReg m_gpr;
};
-class JSValueRegsTemporary {
-public:
- JSValueRegsTemporary();
- JSValueRegsTemporary(SpeculativeJIT*);
- ~JSValueRegsTemporary();
-
- JSValueRegs regs();
-
-private:
-#if USE(JSVALUE64)
- GPRTemporary m_gpr;
-#else
- GPRTemporary m_payloadGPR;
- GPRTemporary m_tagGPR;
-#endif
-};
-
class FPRTemporary {
public:
FPRTemporary(SpeculativeJIT*);
@@ -2796,18 +2564,18 @@ private:
//
// These classes lock the result of a call to a C++ helper function.
-class GPRFlushedCallResult : public GPRTemporary {
+class GPRResult : public GPRTemporary {
public:
- GPRFlushedCallResult(SpeculativeJIT* jit)
+ GPRResult(SpeculativeJIT* jit)
: GPRTemporary(jit, GPRInfo::returnValueGPR)
{
}
};
#if USE(JSVALUE32_64)
-class GPRFlushedCallResult2 : public GPRTemporary {
+class GPRResult2 : public GPRTemporary {
public:
- GPRFlushedCallResult2(SpeculativeJIT* jit)
+ GPRResult2(SpeculativeJIT* jit)
: GPRTemporary(jit, GPRInfo::returnValueGPR2)
{
}
@@ -2948,12 +2716,12 @@ private:
// Gives you a canonical Int52 (i.e. it's left-shifted by 16, low bits zero).
class SpeculateInt52Operand {
public:
- explicit SpeculateInt52Operand(SpeculativeJIT* jit, Edge edge)
+ explicit SpeculateInt52Operand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
: m_jit(jit)
, m_edge(edge)
, m_gprOrInvalid(InvalidGPRReg)
{
- RELEASE_ASSERT(edge.useKind() == Int52RepUse);
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == MachineIntUse);
if (jit->isFilled(node()))
gpr();
}
@@ -2995,12 +2763,12 @@ private:
// Gives you a strict Int52 (i.e. the payload is in the low 48 bits, high 16 bits are sign-extended).
class SpeculateStrictInt52Operand {
public:
- explicit SpeculateStrictInt52Operand(SpeculativeJIT* jit, Edge edge)
+ explicit SpeculateStrictInt52Operand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
: m_jit(jit)
, m_edge(edge)
, m_gprOrInvalid(InvalidGPRReg)
{
- RELEASE_ASSERT(edge.useKind() == Int52RepUse);
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == MachineIntUse);
if (jit->isFilled(node()))
gpr();
}
@@ -3043,35 +2811,35 @@ enum OppositeShiftTag { OppositeShift };
class SpeculateWhicheverInt52Operand {
public:
- explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge)
+ explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
: m_jit(jit)
, m_edge(edge)
, m_gprOrInvalid(InvalidGPRReg)
, m_strict(jit->betterUseStrictInt52(edge))
{
- RELEASE_ASSERT(edge.useKind() == Int52RepUse);
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == MachineIntUse);
if (jit->isFilled(node()))
gpr();
}
- explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, const SpeculateWhicheverInt52Operand& other)
+ explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, const SpeculateWhicheverInt52Operand& other, OperandSpeculationMode mode = AutomaticOperandSpeculation)
: m_jit(jit)
, m_edge(edge)
, m_gprOrInvalid(InvalidGPRReg)
, m_strict(other.m_strict)
{
- RELEASE_ASSERT(edge.useKind() == Int52RepUse);
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == MachineIntUse);
if (jit->isFilled(node()))
gpr();
}
- explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, OppositeShiftTag, const SpeculateWhicheverInt52Operand& other)
+ explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, OppositeShiftTag, const SpeculateWhicheverInt52Operand& other, OperandSpeculationMode mode = AutomaticOperandSpeculation)
: m_jit(jit)
, m_edge(edge)
, m_gprOrInvalid(InvalidGPRReg)
, m_strict(!other.m_strict)
{
- RELEASE_ASSERT(edge.useKind() == Int52RepUse);
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == MachineIntUse);
if (jit->isFilled(node()))
gpr();
}
@@ -3120,13 +2888,13 @@ private:
class SpeculateDoubleOperand {
public:
- explicit SpeculateDoubleOperand(SpeculativeJIT* jit, Edge edge)
+ explicit SpeculateDoubleOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
: m_jit(jit)
, m_edge(edge)
, m_fprOrInvalid(InvalidFPRReg)
{
ASSERT(m_jit);
- RELEASE_ASSERT(isDouble(edge.useKind()));
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || isDouble(edge.useKind()));
if (jit->isFilled(node()))
fpr();
}
@@ -3226,7 +2994,7 @@ public:
, m_gprOrInvalid(InvalidGPRReg)
{
ASSERT(m_jit);
- ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse || edge.useKind() == KnownBooleanUse);
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse);
if (jit->isFilled(node()))
gpr();
}
@@ -3269,13 +3037,13 @@ template<typename StructureLocationType>
void SpeculativeJIT::speculateStringObjectForStructure(Edge edge, StructureLocationType structureLocation)
{
Structure* stringObjectStructure =
- m_jit.globalObjectFor(m_currentNode->origin.semantic)->stringObjectStructure();
+ m_jit.globalObjectFor(m_currentNode->codeOrigin)->stringObjectStructure();
- if (!m_state.forNode(edge).m_structure.isSubsetOf(StructureSet(stringObjectStructure))) {
+ if (!m_state.forNode(edge).m_currentKnownStructure.isSubsetOf(StructureSet(stringObjectStructure))) {
speculationCheck(
NotStringObject, JSValueRegs(), 0,
- m_jit.branchStructurePtr(
- JITCompiler::NotEqual, structureLocation, stringObjectStructure));
+ m_jit.branchPtr(
+ JITCompiler::NotEqual, structureLocation, TrustedImmPtr(stringObjectStructure)));
}
}
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
index 1f326cbd7..bc21f929f 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
* Copyright (C) 2011 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,16 +35,9 @@
#include "DFGOperations.h"
#include "DFGSlowPathGenerator.h"
#include "Debugger.h"
-#include "DirectArguments.h"
-#include "GetterSetter.h"
-#include "JSEnvironmentRecord.h"
-#include "JSLexicalEnvironment.h"
-#include "JSPropertyNameEnumerator.h"
+#include "JSActivation.h"
#include "ObjectPrototype.h"
-#include "JSCInlines.h"
-#include "SetupVarargsFrame.h"
-#include "TypeProfilerLog.h"
-#include "Watchdog.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -64,12 +57,11 @@ bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR,
if (edge->hasConstant()) {
tagGPR = allocate();
payloadGPR = allocate();
- JSValue value = edge->asJSValue();
- m_jit.move(Imm32(value.tag()), tagGPR);
- m_jit.move(Imm32(value.payload()), payloadGPR);
+ m_jit.move(Imm32(valueOfJSConstant(edge.node()).tag()), tagGPR);
+ m_jit.move(Imm32(valueOfJSConstant(edge.node()).payload()), payloadGPR);
m_gprs.retain(tagGPR, virtualRegister, SpillOrderConstant);
m_gprs.retain(payloadGPR, virtualRegister, SpillOrderConstant);
- info.fillJSValue(*m_stream, tagGPR, payloadGPR, DataFormatJS);
+ info.fillJSValue(*m_stream, tagGPR, payloadGPR, isInt32Constant(edge.node()) ? DataFormatJSInt32 : DataFormatJS);
} else {
DataFormat spillFormat = info.spillFormat();
ASSERT(spillFormat != DataFormatNone && spillFormat != DataFormatStorage);
@@ -114,7 +106,7 @@ bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR,
m_gprs.lock(gpr);
}
tagGPR = allocate();
- int32_t tag = JSValue::EmptyValueTag;
+ uint32_t tag = JSValue::EmptyValueTag;
DataFormat fillFormat = DataFormatJS;
switch (info.registerFormat()) {
case DataFormatInt32:
@@ -142,6 +134,20 @@ bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR,
}
case DataFormatJSDouble:
+ case DataFormatDouble: {
+ FPRReg oldFPR = info.fpr();
+ m_fprs.lock(oldFPR);
+ tagGPR = allocate();
+ payloadGPR = allocate();
+ boxDouble(oldFPR, tagGPR, payloadGPR);
+ m_fprs.unlock(oldFPR);
+ m_fprs.release(oldFPR);
+ m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS);
+ m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS);
+ info.fillJSValue(*m_stream, tagGPR, payloadGPR, DataFormatJS);
+ return true;
+ }
+
case DataFormatJS:
case DataFormatJSInt32:
case DataFormatJSCell:
@@ -154,7 +160,6 @@ bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR,
}
case DataFormatStorage:
- case DataFormatDouble:
// this type currently never occurs
RELEASE_ASSERT_NOT_REACHED();
@@ -164,28 +169,12 @@ bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR,
}
}
-void SpeculativeJIT::cachedGetById(
- CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR,
- unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
+void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
- // This is a hacky fix for when the register allocator decides to alias the base payload with the result tag. This only happens
- // in the case of GetByIdFlush, which has a relatively expensive register allocation story already so we probably don't need to
- // trip over one move instruction.
- if (basePayloadGPR == resultTagGPR) {
- RELEASE_ASSERT(basePayloadGPR != resultPayloadGPR);
-
- if (baseTagGPROrNone == resultPayloadGPR) {
- m_jit.swap(basePayloadGPR, baseTagGPROrNone);
- baseTagGPROrNone = resultTagGPR;
- } else
- m_jit.move(basePayloadGPR, resultPayloadGPR);
- basePayloadGPR = resultPayloadGPR;
- }
-
JITGetByIdGenerator gen(
- m_jit.codeBlock(), codeOrigin, usedRegisters(),
+ m_jit.codeBlock(), codeOrigin, usedRegisters(), GPRInfo::callFrameRegister,
JSValueRegs(baseTagGPROrNone, basePayloadGPR),
- JSValueRegs(resultTagGPR, resultPayloadGPR), spillMode);
+ JSValueRegs(resultTagGPR, resultPayloadGPR), spillMode != NeedToSpill);
gen.generateFastPath(m_jit);
@@ -193,8 +182,8 @@ void SpeculativeJIT::cachedGetById(
if (slowPathTarget.isSet())
slowCases.append(slowPathTarget);
slowCases.append(gen.slowPathJump());
-
- std::unique_ptr<SlowPathGenerator> slowPath;
+
+ OwnPtr<SlowPathGenerator> slowPath;
if (baseTagGPROrNone == InvalidGPRReg) {
slowPath = slowPathCall(
slowCases, this, operationGetByIdOptimize,
@@ -207,17 +196,17 @@ void SpeculativeJIT::cachedGetById(
JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(), baseTagGPROrNone,
basePayloadGPR, identifierUID(identifierNumber));
}
-
+
m_jit.addGetById(gen, slowPath.get());
- addSlowPathGenerator(WTF::move(slowPath));
+ addSlowPathGenerator(slowPath.release());
}
-void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
+void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
{
JITPutByIdGenerator gen(
- m_jit.codeBlock(), codeOrigin, usedRegisters(),
+ m_jit.codeBlock(), codeOrigin, usedRegisters(), GPRInfo::callFrameRegister,
JSValueRegs::payloadOnly(basePayloadGPR), JSValueRegs(valueTagGPR, valuePayloadGPR),
- scratchGPR, spillMode, m_jit.ecmaModeFor(codeOrigin), putKind);
+ scratchGPR, false, m_jit.ecmaModeFor(codeOrigin), putKind);
gen.generateFastPath(m_jit);
@@ -226,12 +215,12 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR,
slowCases.append(slowPathTarget);
slowCases.append(gen.slowPathJump());
- auto slowPath = slowPathCall(
+ OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
slowCases, this, gen.slowPathFunction(), NoResult, gen.stubInfo(), valueTagGPR,
valuePayloadGPR, basePayloadGPR, identifierUID(identifierNumber));
m_jit.addPutById(gen, slowPath.get());
- addSlowPathGenerator(WTF::move(slowPath));
+ addSlowPathGenerator(slowPath.release());
}
void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert)
@@ -247,8 +236,8 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv
JITCompiler::Jump notMasqueradesAsUndefined;
if (masqueradesAsUndefinedWatchpointIsStillValid()) {
if (!isKnownCell(operand.node()))
- notCell = m_jit.branchIfNotCell(arg.jsValueRegs());
-
+ notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag));
+
m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR);
notMasqueradesAsUndefined = m_jit.jump();
} else {
@@ -256,12 +245,10 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv
GPRTemporary remoteGlobalObject(this);
if (!isKnownCell(operand.node()))
- notCell = m_jit.branchIfNotCell(arg.jsValueRegs());
-
- JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(
- JITCompiler::NonZero,
- JITCompiler::Address(argPayloadGPR, JSCell::typeInfoFlagsOffset()),
- JITCompiler::TrustedImm32(MasqueradesAsUndefined));
+ notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag));
+
+ m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultPayloadGPR);
+ JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::NonZero, JITCompiler::Address(resultPayloadGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined));
m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR);
notMasqueradesAsUndefined = m_jit.jump();
@@ -269,8 +256,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv
isMasqueradesAsUndefined.link(&m_jit);
GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
- m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
- m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureIDOffset()), resultPayloadGPR);
+ m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)), localGlobalObjectGPR);
m_jit.loadPtr(JITCompiler::Address(resultPayloadGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
m_jit.compare32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultPayloadGPR);
}
@@ -281,7 +267,8 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv
notCell.link(&m_jit);
// null or undefined?
COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
- m_jit.or32(TrustedImm32(1), argTagGPR, resultPayloadGPR);
+ m_jit.move(argTagGPR, resultPayloadGPR);
+ m_jit.or32(TrustedImm32(1), resultPayloadGPR);
m_jit.compare32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultPayloadGPR, TrustedImm32(JSValue::NullTag), resultPayloadGPR);
done.link(&m_jit);
@@ -294,8 +281,8 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv
void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branchNode, bool invert)
{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+ BasicBlock* taken = branchNode->takenBlock();
+ BasicBlock* notTaken = branchNode->notTakenBlock();
if (taken == nextBlock()) {
invert = !invert;
@@ -315,25 +302,22 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branch
if (masqueradesAsUndefinedWatchpointIsStillValid()) {
if (!isKnownCell(operand.node()))
- notCell = m_jit.branchIfNotCell(arg.jsValueRegs());
-
+ notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag));
+
jump(invert ? taken : notTaken, ForceJump);
} else {
GPRTemporary localGlobalObject(this);
GPRTemporary remoteGlobalObject(this);
if (!isKnownCell(operand.node()))
- notCell = m_jit.branchIfNotCell(arg.jsValueRegs());
-
- branchTest8(JITCompiler::Zero,
- JITCompiler::Address(argPayloadGPR, JSCell::typeInfoFlagsOffset()),
- JITCompiler::TrustedImm32(MasqueradesAsUndefined),
- invert ? taken : notTaken);
+ notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag));
+
+ m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultGPR);
+ branchTest8(JITCompiler::Zero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), invert ? taken : notTaken);
GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
- m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
- m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureIDOffset()), resultGPR);
+ m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)), localGlobalObjectGPR);
m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, invert ? notTaken : taken);
}
@@ -344,7 +328,8 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branch
notCell.link(&m_jit);
// null or undefined?
COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
- m_jit.or32(TrustedImm32(1), argTagGPR, resultGPR);
+ m_jit.move(argTagGPR, resultGPR);
+ m_jit.or32(TrustedImm32(1), resultGPR);
branch32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(JSValue::NullTag), taken);
}
@@ -376,8 +361,8 @@ bool SpeculativeJIT::nonSpeculativeCompareNull(Node* node, Edge operand, bool in
void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+ BasicBlock* taken = branchNode->takenBlock();
+ BasicBlock* notTaken = branchNode->notTakenBlock();
JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
@@ -401,7 +386,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode,
JITCompiler::JumpList slowPath;
if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultGPR = result.gpr();
arg1.use();
@@ -492,7 +477,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler
JITCompiler::JumpList slowPath;
if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultPayloadGPR = result.gpr();
arg1.use();
@@ -517,9 +502,10 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler
m_jit.compare32(cond, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR);
if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
- addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>>(
+ addSlowPathGenerator(adoptPtr(
+ new CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>(
slowPath, this, helperFunction, resultPayloadGPR, arg1TagGPR,
- arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR));
+ arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR)));
}
booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
@@ -528,8 +514,8 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler
void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert)
{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+ BasicBlock* taken = branchNode->takenBlock();
+ BasicBlock* notTaken = branchNode->notTakenBlock();
// The branch instruction will branch to the taken block.
// If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
@@ -622,186 +608,64 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert)
booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
}
-void SpeculativeJIT::compileMiscStrictEq(Node* node)
-{
- JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
- JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
- GPRTemporary result(this);
-
- if (node->child1().useKind() == MiscUse)
- speculateMisc(node->child1(), op1.jsValueRegs());
- if (node->child2().useKind() == MiscUse)
- speculateMisc(node->child2(), op2.jsValueRegs());
-
- m_jit.move(TrustedImm32(0), result.gpr());
- JITCompiler::Jump notEqual = m_jit.branch32(JITCompiler::NotEqual, op1.tagGPR(), op2.tagGPR());
- m_jit.compare32(JITCompiler::Equal, op1.payloadGPR(), op2.payloadGPR(), result.gpr());
- notEqual.link(&m_jit);
- booleanResult(result.gpr(), node);
-}
-
void SpeculativeJIT::emitCall(Node* node)
{
- CallLinkInfo::CallType callType;
- bool isVarargs = false;
- bool isForwardVarargs = false;
- switch (node->op()) {
- case Call:
- callType = CallLinkInfo::Call;
- break;
- case Construct:
- callType = CallLinkInfo::Construct;
- break;
- case CallVarargs:
- callType = CallLinkInfo::CallVarargs;
- isVarargs = true;
- break;
- case ConstructVarargs:
- callType = CallLinkInfo::ConstructVarargs;
- isVarargs = true;
- break;
- case CallForwardVarargs:
- callType = CallLinkInfo::CallVarargs;
- isForwardVarargs = true;
- break;
- case ConstructForwardVarargs:
- callType = CallLinkInfo::ConstructVarargs;
- isForwardVarargs = true;
- break;
- default:
- DFG_CRASH(m_jit.graph(), node, "bad node type");
- break;
- }
+ if (node->op() != Call)
+ ASSERT(node->op() == Construct);
- Edge calleeEdge = m_jit.graph().child(node, 0);
-
- // Gotta load the arguments somehow. Varargs is trickier.
- if (isVarargs || isForwardVarargs) {
- CallVarargsData* data = node->callVarargsData();
+ // For constructors, the this argument is not passed but we have to make space
+ // for it.
+ int dummyThisArgument = node->op() == Call ? 0 : 1;
- GPRReg resultGPR;
- unsigned numUsedStackSlots = m_jit.graph().m_nextMachineLocal;
-
- if (isForwardVarargs) {
- flushRegisters();
- use(node->child2());
-
- GPRReg scratchGPR1;
- GPRReg scratchGPR2;
- GPRReg scratchGPR3;
-
- scratchGPR1 = JITCompiler::selectScratchGPR();
- scratchGPR2 = JITCompiler::selectScratchGPR(scratchGPR1);
- scratchGPR3 = JITCompiler::selectScratchGPR(scratchGPR1, scratchGPR2);
-
- m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR2);
- JITCompiler::JumpList slowCase;
- emitSetupVarargsFrameFastCase(m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase);
- JITCompiler::Jump done = m_jit.jump();
- slowCase.link(&m_jit);
- callOperation(operationThrowStackOverflowForVarargs);
- m_jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
- done.link(&m_jit);
- resultGPR = scratchGPR2;
- } else {
- GPRReg argumentsPayloadGPR;
- GPRReg argumentsTagGPR;
- GPRReg scratchGPR1;
- GPRReg scratchGPR2;
- GPRReg scratchGPR3;
-
- auto loadArgumentsGPR = [&] (GPRReg reservedGPR) {
- if (reservedGPR != InvalidGPRReg)
- lock(reservedGPR);
- JSValueOperand arguments(this, node->child2());
- argumentsTagGPR = arguments.tagGPR();
- argumentsPayloadGPR = arguments.payloadGPR();
- if (reservedGPR != InvalidGPRReg)
- unlock(reservedGPR);
- flushRegisters();
-
- scratchGPR1 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, reservedGPR);
- scratchGPR2 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, scratchGPR1, reservedGPR);
- scratchGPR3 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, scratchGPR1, scratchGPR2, reservedGPR);
- };
-
- loadArgumentsGPR(InvalidGPRReg);
-
- DFG_ASSERT(m_jit.graph(), node, isFlushed());
-
- // Right now, arguments is in argumentsTagGPR/argumentsPayloadGPR and the register file is
- // flushed.
- callOperation(operationSizeFrameForVarargs, GPRInfo::returnValueGPR, argumentsTagGPR, argumentsPayloadGPR, numUsedStackSlots, data->firstVarArgOffset);
-
- // Now we have the argument count of the callee frame, but we've lost the arguments operand.
- // Reconstruct the arguments operand while preserving the callee frame.
- loadArgumentsGPR(GPRInfo::returnValueGPR);
- m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR1);
- emitSetVarargsFrame(m_jit, GPRInfo::returnValueGPR, false, scratchGPR1, scratchGPR1);
- m_jit.addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), scratchGPR1, JITCompiler::stackPointerRegister);
-
- callOperation(operationSetupVarargsFrame, GPRInfo::returnValueGPR, scratchGPR1, argumentsTagGPR, argumentsPayloadGPR, data->firstVarArgOffset, GPRInfo::returnValueGPR);
- resultGPR = GPRInfo::returnValueGPR;
- }
-
- m_jit.addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), resultGPR, JITCompiler::stackPointerRegister);
-
- DFG_ASSERT(m_jit.graph(), node, isFlushed());
-
- // We don't need the arguments array anymore.
- if (isVarargs)
- use(node->child2());
-
- // Now set up the "this" argument.
- JSValueOperand thisArgument(this, node->child3());
- GPRReg thisArgumentTagGPR = thisArgument.tagGPR();
- GPRReg thisArgumentPayloadGPR = thisArgument.payloadGPR();
- thisArgument.use();
-
- m_jit.store32(thisArgumentTagGPR, JITCompiler::calleeArgumentTagSlot(0));
- m_jit.store32(thisArgumentPayloadGPR, JITCompiler::calleeArgumentPayloadSlot(0));
- } else {
- // The call instruction's first child is either the function (normal call) or the
- // receiver (method call). subsequent children are the arguments.
- int numPassedArgs = node->numChildren() - 1;
-
- m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), m_jit.calleeFramePayloadSlot(JSStack::ArgumentCount));
-
- for (int i = 0; i < numPassedArgs; i++) {
- Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i];
- JSValueOperand arg(this, argEdge);
- GPRReg argTagGPR = arg.tagGPR();
- GPRReg argPayloadGPR = arg.payloadGPR();
- use(argEdge);
-
- m_jit.store32(argTagGPR, m_jit.calleeArgumentTagSlot(i));
- m_jit.store32(argPayloadGPR, m_jit.calleeArgumentPayloadSlot(i));
- }
- }
+ CallLinkInfo::CallType callType = node->op() == Call ? CallLinkInfo::Call : CallLinkInfo::Construct;
+ Edge calleeEdge = m_jit.graph().m_varArgChildren[node->firstChild()];
JSValueOperand callee(this, calleeEdge);
GPRReg calleeTagGPR = callee.tagGPR();
GPRReg calleePayloadGPR = callee.payloadGPR();
use(calleeEdge);
- m_jit.store32(calleePayloadGPR, m_jit.calleeFramePayloadSlot(JSStack::Callee));
- m_jit.store32(calleeTagGPR, m_jit.calleeFrameTagSlot(JSStack::Callee));
+
+ // The call instruction's first child is either the function (normal call) or the
+ // receiver (method call). subsequent children are the arguments.
+ int numPassedArgs = node->numChildren() - 1;
+
+ int numArgs = numPassedArgs + dummyThisArgument;
+
+ m_jit.store32(MacroAssembler::TrustedImm32(numArgs), calleeFramePayloadSlot(numArgs, JSStack::ArgumentCount));
+ m_jit.storePtr(GPRInfo::callFrameRegister, calleeFrameCallerFrame(numArgs));
+ m_jit.store32(calleePayloadGPR, calleeFramePayloadSlot(numArgs, JSStack::Callee));
+ m_jit.store32(calleeTagGPR, calleeFrameTagSlot(numArgs, JSStack::Callee));
+
+ for (int i = 0; i < numPassedArgs; i++) {
+ Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i];
+ JSValueOperand arg(this, argEdge);
+ GPRReg argTagGPR = arg.tagGPR();
+ GPRReg argPayloadGPR = arg.payloadGPR();
+ use(argEdge);
+
+ m_jit.store32(argTagGPR, calleeArgumentTagSlot(numArgs, i + dummyThisArgument));
+ m_jit.store32(argPayloadGPR, calleeArgumentPayloadSlot(numArgs, i + dummyThisArgument));
+ }
flushRegisters();
- GPRFlushedCallResult resultPayload(this);
- GPRFlushedCallResult2 resultTag(this);
+ GPRResult resultPayload(this);
+ GPRResult2 resultTag(this);
GPRReg resultPayloadGPR = resultPayload.gpr();
GPRReg resultTagGPR = resultTag.gpr();
JITCompiler::DataLabelPtr targetToCheck;
JITCompiler::JumpList slowPath;
- m_jit.emitStoreCodeOrigin(node->origin.semantic);
+ m_jit.emitStoreCodeOrigin(node->codeOrigin);
- CallLinkInfo* info = m_jit.codeBlock()->addCallLinkInfo();
-
- slowPath.append(m_jit.branchIfNotCell(callee.jsValueRegs()));
+ m_jit.addPtr(TrustedImm32(calleeFrameOffset(numArgs)), GPRInfo::callFrameRegister);
+
+ slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, calleeTagGPR, TrustedImm32(JSValue::CellTag)));
slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck));
+ m_jit.loadPtr(MacroAssembler::Address(calleePayloadGPR, OBJECT_OFFSETOF(JSFunction, m_scope)), resultPayloadGPR);
+ m_jit.storePtr(resultPayloadGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag), MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
JITCompiler::Call fastCall = m_jit.nearCall();
@@ -821,7 +685,6 @@ void SpeculativeJIT::emitCall(Node* node)
m_jit.move(calleePayloadGPR, GPRInfo::regT0);
m_jit.move(calleeTagGPR, GPRInfo::regT1);
}
- m_jit.move(MacroAssembler::TrustedImmPtr(info), GPRInfo::regT2);
JITCompiler::Call slowCall = m_jit.nearCall();
done.link(&m_jit);
@@ -830,12 +693,7 @@ void SpeculativeJIT::emitCall(Node* node)
jsValueResult(resultTagGPR, resultPayloadGPR, node, DataFormatJS, UseChildrenCalledExplicitly);
- info->setUpCall(callType, node->origin.semantic, calleePayloadGPR);
- m_jit.addJSCall(fastCall, slowCall, targetToCheck, info);
-
- // If we were varargs, then after the calls are done, we need to reestablish our stack pointer.
- if (isVarargs || isForwardVarargs)
- m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister);
+ m_jit.addJSCall(fastCall, slowCall, targetToCheck, callType, calleePayloadGPR, node->codeOrigin);
}
template<bool strict>
@@ -844,23 +702,22 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnF
AbstractValue& value = m_state.forNode(edge);
SpeculatedType type = value.m_type;
ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32));
-
m_interpreter.filter(value, SpecInt32);
- if (value.isClear()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
- returnFormat = DataFormatInt32;
- return allocate();
- }
-
VirtualRegister virtualRegister = edge->virtualRegister();
GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
switch (info.registerFormat()) {
case DataFormatNone: {
+ if ((edge->hasConstant() && !isInt32Constant(edge.node())) || info.spillFormat() == DataFormatDouble) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ returnFormat = DataFormatInt32;
+ return allocate();
+ }
+
if (edge->hasConstant()) {
- ASSERT(edge->isInt32Constant());
+ ASSERT(isInt32Constant(edge.node()));
GPRReg gpr = allocate();
- m_jit.move(MacroAssembler::Imm32(edge->asInt32()), gpr);
+ m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr);
m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
info.fillInt32(*m_stream, gpr);
returnFormat = DataFormatInt32;
@@ -868,7 +725,6 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnF
}
DataFormat spillFormat = info.spillFormat();
-
ASSERT_UNUSED(spillFormat, (spillFormat & DataFormatJS) || spillFormat == DataFormatInt32);
// If we know this was spilled as an integer we can fill without checking.
@@ -909,12 +765,16 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnF
return gpr;
}
+ case DataFormatDouble:
case DataFormatCell:
case DataFormatBoolean:
case DataFormatJSDouble:
case DataFormatJSCell:
case DataFormatJSBoolean:
- case DataFormatDouble:
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ returnFormat = DataFormatInt32;
+ return allocate();
+
case DataFormatStorage:
default:
RELEASE_ASSERT_NOT_REACHED();
@@ -937,34 +797,136 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Strict(Edge edge)
FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge)
{
- ASSERT(isDouble(edge.useKind()));
- ASSERT(edge->hasDoubleResult());
+ AbstractValue& value = m_state.forNode(edge);
+ SpeculatedType type = value.m_type;
+ ASSERT(edge.useKind() != KnownNumberUse || !(value.m_type & ~SpecFullNumber));
+ m_interpreter.filter(value, SpecFullNumber);
VirtualRegister virtualRegister = edge->virtualRegister();
GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
if (info.registerFormat() == DataFormatNone) {
if (edge->hasConstant()) {
- RELEASE_ASSERT(edge->isNumberConstant());
+ if (isInt32Constant(edge.node())) {
+ GPRReg gpr = allocate();
+ m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ info.fillInt32(*m_stream, gpr);
+ unlock(gpr);
+ } else if (isNumberConstant(edge.node())) {
+ FPRReg fpr = fprAllocate();
+ m_jit.loadDouble(addressOfDoubleConstant(edge.node()), fpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderConstant);
+ info.fillDouble(*m_stream, fpr);
+ return fpr;
+ } else {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ return fprAllocate();
+ }
+ } else {
+ DataFormat spillFormat = info.spillFormat();
+ ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInt32);
+ if (spillFormat == DataFormatJSDouble || spillFormat == DataFormatDouble) {
+ FPRReg fpr = fprAllocate();
+ m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled);
+ info.fillDouble(*m_stream, fpr);
+ return fpr;
+ }
+
FPRReg fpr = fprAllocate();
- m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(edge.node())), fpr);
- m_fprs.retain(fpr, virtualRegister, SpillOrderConstant);
+ JITCompiler::Jump hasUnboxedDouble;
+
+ if (spillFormat != DataFormatJSInt32 && spillFormat != DataFormatInt32) {
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag));
+ if (type & ~SpecFullNumber)
+ speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::AboveOrEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::LowestTag)));
+ m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
+ hasUnboxedDouble = m_jit.jump();
+
+ isInteger.link(&m_jit);
+ }
+
+ m_jit.convertInt32ToDouble(JITCompiler::payloadFor(virtualRegister), fpr);
+
+ if (hasUnboxedDouble.isSet())
+ hasUnboxedDouble.link(&m_jit);
+
+ m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled);
info.fillDouble(*m_stream, fpr);
+ info.killSpilled();
return fpr;
}
-
- RELEASE_ASSERT(info.spillFormat() == DataFormatDouble);
+ }
+
+ switch (info.registerFormat()) {
+ case DataFormatJS:
+ case DataFormatJSInt32: {
+ GPRReg tagGPR = info.tagGPR();
+ GPRReg payloadGPR = info.payloadGPR();
FPRReg fpr = fprAllocate();
- m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
- m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled);
+
+ m_gprs.lock(tagGPR);
+ m_gprs.lock(payloadGPR);
+
+ JITCompiler::Jump hasUnboxedDouble;
+
+ if (info.registerFormat() != DataFormatJSInt32) {
+ FPRTemporary scratch(this);
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
+ if (type & ~SpecFullNumber)
+ speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)));
+ unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
+ hasUnboxedDouble = m_jit.jump();
+ isInteger.link(&m_jit);
+ }
+
+ m_jit.convertInt32ToDouble(payloadGPR, fpr);
+
+ if (hasUnboxedDouble.isSet())
+ hasUnboxedDouble.link(&m_jit);
+
+ m_gprs.release(tagGPR);
+ m_gprs.release(payloadGPR);
+ m_gprs.unlock(tagGPR);
+ m_gprs.unlock(payloadGPR);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
info.fillDouble(*m_stream, fpr);
+ info.killSpilled();
return fpr;
}
- RELEASE_ASSERT(info.registerFormat() == DataFormatDouble);
- FPRReg fpr = info.fpr();
- m_fprs.lock(fpr);
- return fpr;
+ case DataFormatInt32: {
+ FPRReg fpr = fprAllocate();
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ m_jit.convertInt32ToDouble(gpr, fpr);
+ m_gprs.unlock(gpr);
+ return fpr;
+ }
+
+ case DataFormatJSDouble:
+ case DataFormatDouble: {
+ FPRReg fpr = info.fpr();
+ m_fprs.lock(fpr);
+ return fpr;
+ }
+
+ case DataFormatNone:
+ case DataFormatStorage:
+ RELEASE_ASSERT_NOT_REACHED();
+
+ case DataFormatCell:
+ case DataFormatJSCell:
+ case DataFormatBoolean:
+ case DataFormatJSBoolean:
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ return fprAllocate();
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return InvalidFPRReg;
+ }
}
GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
@@ -972,38 +934,33 @@ GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
AbstractValue& value = m_state.forNode(edge);
SpeculatedType type = value.m_type;
ASSERT((edge.useKind() != KnownCellUse && edge.useKind() != KnownStringUse) || !(value.m_type & ~SpecCell));
-
m_interpreter.filter(value, SpecCell);
- if (value.isClear()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
- return allocate();
- }
-
VirtualRegister virtualRegister = edge->virtualRegister();
GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
switch (info.registerFormat()) {
case DataFormatNone: {
+ if (info.spillFormat() == DataFormatInt32 || info.spillFormat() == DataFormatDouble) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ return allocate();
+ }
+
if (edge->hasConstant()) {
- JSValue jsValue = edge->asJSValue();
+ JSValue jsValue = valueOfJSConstant(edge.node());
GPRReg gpr = allocate();
- m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
- m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr);
- info.fillCell(*m_stream, gpr);
+ if (jsValue.isCell()) {
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr);
+ info.fillCell(*m_stream, gpr);
+ return gpr;
+ }
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
return gpr;
}
ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatCell);
- if (type & ~SpecCell) {
- speculationCheck(
- BadType,
- JSValueSource(JITCompiler::addressFor(virtualRegister)),
- edge,
- m_jit.branch32(
- MacroAssembler::NotEqual,
- JITCompiler::tagFor(virtualRegister),
- TrustedImm32(JSValue::CellTag)));
- }
+ if (type & ~SpecCell)
+ speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
GPRReg gpr = allocate();
m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
@@ -1023,11 +980,8 @@ GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
GPRReg payloadGPR = info.payloadGPR();
m_gprs.lock(tagGPR);
m_gprs.lock(payloadGPR);
- if (type & ~SpecCell) {
- speculationCheck(
- BadType, JSValueRegs(tagGPR, payloadGPR), edge,
- m_jit.branchIfNotCell(info.jsValueRegs()));
- }
+ if (type & ~SpecCell)
+ speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::CellTag)));
m_gprs.unlock(tagGPR);
m_gprs.release(tagGPR);
m_gprs.release(payloadGPR);
@@ -1039,9 +993,12 @@ GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
case DataFormatJSInt32:
case DataFormatInt32:
case DataFormatJSDouble:
+ case DataFormatDouble:
case DataFormatJSBoolean:
case DataFormatBoolean:
- case DataFormatDouble:
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ return allocate();
+
case DataFormatStorage:
RELEASE_ASSERT_NOT_REACHED();
@@ -1055,25 +1012,27 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge)
{
AbstractValue& value = m_state.forNode(edge);
SpeculatedType type = value.m_type;
- ASSERT(edge.useKind() != KnownBooleanUse || !(value.m_type & ~SpecBoolean));
-
m_interpreter.filter(value, SpecBoolean);
- if (value.isClear()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
- return allocate();
- }
-
VirtualRegister virtualRegister = edge->virtualRegister();
GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
switch (info.registerFormat()) {
case DataFormatNone: {
+ if (info.spillFormat() == DataFormatInt32 || info.spillFormat() == DataFormatDouble) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ return allocate();
+ }
+
if (edge->hasConstant()) {
- JSValue jsValue = edge->asJSValue();
+ JSValue jsValue = valueOfJSConstant(edge.node());
GPRReg gpr = allocate();
- m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
- m_jit.move(MacroAssembler::TrustedImm32(jsValue.asBoolean()), gpr);
- info.fillBoolean(*m_stream, gpr);
+ if (jsValue.isBoolean()) {
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ m_jit.move(MacroAssembler::TrustedImm32(jsValue.asBoolean()), gpr);
+ info.fillBoolean(*m_stream, gpr);
+ return gpr;
+ }
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
return gpr;
}
@@ -1115,9 +1074,12 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge)
case DataFormatJSInt32:
case DataFormatInt32:
case DataFormatJSDouble:
+ case DataFormatDouble:
case DataFormatJSCell:
case DataFormatCell:
- case DataFormatDouble:
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ return allocate();
+
case DataFormatStorage:
RELEASE_ASSERT_NOT_REACHED();
@@ -1127,6 +1089,28 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge)
}
}
+JITCompiler::Jump SpeculativeJIT::convertToDouble(JSValueOperand& op, FPRReg result)
+{
+ FPRTemporary scratch(this);
+
+ GPRReg opPayloadGPR = op.payloadGPR();
+ GPRReg opTagGPR = op.tagGPR();
+ FPRReg scratchFPR = scratch.fpr();
+
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, opTagGPR, TrustedImm32(JSValue::Int32Tag));
+ JITCompiler::Jump notNumber = m_jit.branch32(MacroAssembler::AboveOrEqual, opPayloadGPR, TrustedImm32(JSValue::LowestTag));
+
+ unboxDouble(opTagGPR, opPayloadGPR, result, scratchFPR);
+ JITCompiler::Jump done = m_jit.jump();
+
+ isInteger.link(&m_jit);
+ m_jit.convertInt32ToDouble(opPayloadGPR, result);
+
+ done.link(&m_jit);
+
+ return notNumber;
+}
+
void SpeculativeJIT::compileBaseValueStoreBarrier(Edge& baseEdge, Edge& valueEdge)
{
#if ENABLE(GGC)
@@ -1153,24 +1137,41 @@ void SpeculativeJIT::compileObjectEquality(Node* node)
if (masqueradesAsUndefinedWatchpointIsStillValid()) {
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
+ JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
+ JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
} else {
+ GPRTemporary structure(this);
+ GPRReg structureGPR = structure.gpr();
+
+ m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
- speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
+ JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
+ speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
m_jit.branchTest8(
MacroAssembler::NonZero,
- MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
+ m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
- speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
+ JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
+ speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
m_jit.branchTest8(
- MacroAssembler::NonZero,
- MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
@@ -1187,57 +1188,6 @@ void SpeculativeJIT::compileObjectEquality(Node* node)
booleanResult(resultPayloadGPR, node);
}
-void SpeculativeJIT::compileObjectStrictEquality(Edge objectChild, Edge otherChild)
-{
- SpeculateCellOperand op1(this, objectChild);
- JSValueOperand op2(this, otherChild);
-
- GPRReg op1GPR = op1.gpr();
- GPRReg op2GPR = op2.payloadGPR();
-
- DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
-
- GPRTemporary resultPayload(this, Reuse, op1);
- GPRReg resultPayloadGPR = resultPayload.gpr();
-
- MacroAssembler::Jump op2CellJump = m_jit.branchIfCell(op2.jsValueRegs());
-
- m_jit.move(TrustedImm32(0), resultPayloadGPR);
- MacroAssembler::Jump op2NotCellJump = m_jit.jump();
-
- // At this point we know that we can perform a straight-forward equality comparison on pointer
- // values because we are doing strict equality.
- op2CellJump.link(&m_jit);
- m_jit.compare32(MacroAssembler::Equal, op1GPR, op2GPR, resultPayloadGPR);
-
- op2NotCellJump.link(&m_jit);
- booleanResult(resultPayloadGPR, m_currentNode);
-}
-
-void SpeculativeJIT::compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode)
-{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
-
- SpeculateCellOperand op1(this, objectChild);
- JSValueOperand op2(this, otherChild);
-
- GPRReg op1GPR = op1.gpr();
- GPRReg op2GPR = op2.payloadGPR();
-
- DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
-
- branch32(MacroAssembler::NotEqual, op2.tagGPR(), TrustedImm32(JSValue::CellTag), notTaken);
-
- if (taken == nextBlock()) {
- branch32(MacroAssembler::NotEqual, op1GPR, op2GPR, notTaken);
- jump(taken);
- } else {
- branch32(MacroAssembler::Equal, op1GPR, op2GPR, taken);
- jump(notTaken);
- }
-}
-
void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild)
{
SpeculateCellOperand op1(this, leftChild);
@@ -1248,39 +1198,66 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge r
GPRReg op2TagGPR = op2.tagGPR();
GPRReg op2PayloadGPR = op2.payloadGPR();
GPRReg resultGPR = result.gpr();
+ GPRTemporary structure;
+ GPRReg structureGPR = InvalidGPRReg;
bool masqueradesAsUndefinedWatchpointValid =
masqueradesAsUndefinedWatchpointIsStillValid();
+ if (!masqueradesAsUndefinedWatchpointValid) {
+ // The masquerades as undefined case will use the structure register, so allocate it here.
+ // Do this at the top of the function to avoid branching around a register allocation.
+ GPRTemporary realStructure(this);
+ structure.adopt(realStructure);
+ structureGPR = structure.gpr();
+ }
+
if (masqueradesAsUndefinedWatchpointValid) {
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
+ JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
} else {
+ m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
+ JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
m_jit.branchTest8(
MacroAssembler::NonZero,
- MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
// It seems that most of the time when programs do a == b where b may be either null/undefined
// or an object, b is usually an object. Balance the branches to make that case fast.
- MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(op2.jsValueRegs());
+ MacroAssembler::Jump rightNotCell =
+ m_jit.branch32(MacroAssembler::NotEqual, op2TagGPR, TrustedImm32(JSValue::CellTag));
// We know that within this branch, rightChild must be a cell.
if (masqueradesAsUndefinedWatchpointValid) {
DFG_TYPE_CHECK(
- JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2PayloadGPR));
+ JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject,
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op2PayloadGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
} else {
+ m_jit.loadPtr(MacroAssembler::Address(op2PayloadGPR, JSCell::structureOffset()), structureGPR);
DFG_TYPE_CHECK(
- JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2PayloadGPR));
+ JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject,
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
speculationCheck(BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild,
m_jit.branchTest8(
MacroAssembler::NonZero,
- MacroAssembler::Address(op2PayloadGPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
@@ -1295,7 +1272,8 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge r
// We know that within this branch, rightChild must not be a cell. Check if that is enough to
// prove that it is either null or undefined.
if (needsTypeCheck(rightChild, SpecCell | SpecOther)) {
- m_jit.or32(TrustedImm32(1), op2TagGPR, resultGPR);
+ m_jit.move(op2TagGPR, resultGPR);
+ m_jit.or32(TrustedImm32(1), resultGPR);
typeCheck(
JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, SpecCell | SpecOther,
@@ -1316,8 +1294,8 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge r
void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode)
{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+ BasicBlock* taken = branchNode->takenBlock();
+ BasicBlock* notTaken = branchNode->notTakenBlock();
SpeculateCellOperand op1(this, leftChild);
JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
@@ -1327,40 +1305,65 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild
GPRReg op2TagGPR = op2.tagGPR();
GPRReg op2PayloadGPR = op2.payloadGPR();
GPRReg resultGPR = result.gpr();
+ GPRTemporary structure;
+ GPRReg structureGPR = InvalidGPRReg;
bool masqueradesAsUndefinedWatchpointValid =
masqueradesAsUndefinedWatchpointIsStillValid();
+ if (!masqueradesAsUndefinedWatchpointValid) {
+ // The masquerades as undefined case will use the structure register, so allocate it here.
+ // Do this at the top of the function to avoid branching around a register allocation.
+ GPRTemporary realStructure(this);
+ structure.adopt(realStructure);
+ structureGPR = structure.gpr();
+ }
+
if (masqueradesAsUndefinedWatchpointValid) {
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
+ JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
} else {
+ m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
+ JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
m_jit.branchTest8(
MacroAssembler::NonZero,
- MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
// It seems that most of the time when programs do a == b where b may be either null/undefined
// or an object, b is usually an object. Balance the branches to make that case fast.
- MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(op2.jsValueRegs());
+ MacroAssembler::Jump rightNotCell =
+ m_jit.branch32(MacroAssembler::NotEqual, op2TagGPR, TrustedImm32(JSValue::CellTag));
// We know that within this branch, rightChild must be a cell.
if (masqueradesAsUndefinedWatchpointValid) {
DFG_TYPE_CHECK(
JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject,
- m_jit.branchIfNotObject(op2PayloadGPR));
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op2PayloadGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
} else {
+ m_jit.loadPtr(MacroAssembler::Address(op2PayloadGPR, JSCell::structureOffset()), structureGPR);
DFG_TYPE_CHECK(
JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject,
- m_jit.branchIfNotObject(op2PayloadGPR));
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
speculationCheck(BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild,
m_jit.branchTest8(
MacroAssembler::NonZero,
- MacroAssembler::Address(op2PayloadGPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
@@ -1377,7 +1380,8 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild
jump(notTaken, ForceJump);
rightNotCell.link(&m_jit);
- m_jit.or32(TrustedImm32(1), op2TagGPR, resultGPR);
+ m_jit.move(op2TagGPR, resultGPR);
+ m_jit.or32(TrustedImm32(1), resultGPR);
typeCheck(
JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, SpecCell | SpecOther,
@@ -1436,28 +1440,35 @@ void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse)
structureGPR = structure.gpr();
}
- MacroAssembler::Jump notCell = m_jit.branchIfNotCell(value.jsValueRegs());
+ MacroAssembler::Jump notCell = m_jit.branch32(MacroAssembler::NotEqual, valueTagGPR, TrustedImm32(JSValue::CellTag));
if (masqueradesAsUndefinedWatchpointValid) {
DFG_TYPE_CHECK(
JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject,
- m_jit.branchIfNotObject(valuePayloadGPR));
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(valuePayloadGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
} else {
+ m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureOffset()), structureGPR);
+
DFG_TYPE_CHECK(
JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject,
- m_jit.branchIfNotObject(valuePayloadGPR));
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
MacroAssembler::Jump isNotMasqueradesAsUndefined =
m_jit.branchTest8(
MacroAssembler::Zero,
- MacroAssembler::Address(valuePayloadGPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined));
- m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureIDOffset()), structureGPR);
speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse,
m_jit.branchPtr(
MacroAssembler::Equal,
MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()),
- MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic))));
+ MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin))));
isNotMasqueradesAsUndefined.link(&m_jit);
}
@@ -1468,7 +1479,8 @@ void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse)
COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) {
- m_jit.or32(TrustedImm32(1), valueTagGPR, resultPayloadGPR);
+ m_jit.move(valueTagGPR, resultPayloadGPR);
+ m_jit.or32(TrustedImm32(1), resultPayloadGPR);
typeCheck(
JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, SpecCell | SpecOther,
m_jit.branch32(
@@ -1486,8 +1498,7 @@ void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse)
void SpeculativeJIT::compileLogicalNot(Node* node)
{
switch (node->child1().useKind()) {
- case BooleanUse:
- case KnownBooleanUse: {
+ case BooleanUse: {
SpeculateBooleanOperand value(this, node->child1());
GPRTemporary result(this, Reuse, value);
m_jit.xor32(TrustedImm32(1), value.gpr(), result.gpr());
@@ -1508,7 +1519,7 @@ void SpeculativeJIT::compileLogicalNot(Node* node)
return;
}
- case DoubleRepUse: {
+ case NumberUse: {
SpeculateDoubleOperand value(this, node->child1());
FPRTemporary scratch(this);
GPRTemporary resultPayload(this);
@@ -1559,27 +1570,31 @@ void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, Ba
GPRReg valuePayloadGPR = value.payloadGPR();
GPRReg scratchGPR = scratch.gpr();
- MacroAssembler::Jump notCell = m_jit.branchIfNotCell(value.jsValueRegs());
+ MacroAssembler::Jump notCell = m_jit.branch32(MacroAssembler::NotEqual, valueTagGPR, TrustedImm32(JSValue::CellTag));
if (masqueradesAsUndefinedWatchpointIsStillValid()) {
DFG_TYPE_CHECK(
JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject,
- m_jit.branchIfNotObject(valuePayloadGPR));
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(valuePayloadGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
} else {
+ m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureOffset()), scratchGPR);
+
DFG_TYPE_CHECK(
JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject,
- m_jit.branchIfNotObject(valuePayloadGPR));
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ scratchGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
- JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(
- JITCompiler::Zero,
- MacroAssembler::Address(valuePayloadGPR, JSCell::typeInfoFlagsOffset()),
- TrustedImm32(MasqueradesAsUndefined));
+ JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::Zero, MacroAssembler::Address(scratchGPR, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
- m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureIDOffset()), scratchGPR);
speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse,
m_jit.branchPtr(
MacroAssembler::Equal,
MacroAssembler::Address(scratchGPR, Structure::globalObjectOffset()),
- MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic))));
+ MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin))));
isNotMasqueradesAsUndefined.link(&m_jit);
}
@@ -1589,7 +1604,8 @@ void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, Ba
COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) {
- m_jit.or32(TrustedImm32(1), valueTagGPR, scratchGPR);
+ m_jit.move(valueTagGPR, scratchGPR);
+ m_jit.or32(TrustedImm32(1), scratchGPR);
typeCheck(
JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, SpecCell | SpecOther,
m_jit.branch32(MacroAssembler::NotEqual, scratchGPR, TrustedImm32(JSValue::NullTag)));
@@ -1602,12 +1618,11 @@ void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, Ba
void SpeculativeJIT::emitBranch(Node* node)
{
- BasicBlock* taken = node->branchData()->taken.block;
- BasicBlock* notTaken = node->branchData()->notTaken.block;
+ BasicBlock* taken = node->takenBlock();
+ BasicBlock* notTaken = node->notTakenBlock();
switch (node->child1().useKind()) {
- case BooleanUse:
- case KnownBooleanUse: {
+ case BooleanUse: {
SpeculateBooleanOperand value(this, node->child1());
MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
@@ -1629,13 +1644,8 @@ void SpeculativeJIT::emitBranch(Node* node)
emitObjectOrOtherBranch(node->child1(), taken, notTaken);
return;
}
-
- case StringUse: {
- emitStringBranch(node->child1(), taken, notTaken);
- return;
- }
-
- case DoubleRepUse:
+
+ case NumberUse:
case Int32Use: {
if (node->child1().useKind() == Int32Use) {
bool invert = false;
@@ -1776,43 +1786,39 @@ void SpeculativeJIT::compile(Node* node)
switch (op) {
case JSConstant:
- case DoubleConstant:
- case PhantomDirectArguments:
- case PhantomClonedArguments:
+ initConstantInfo(node);
+ break;
+
+ case PhantomArguments:
+ initConstantInfo(node);
+ break;
+
+ case WeakJSConstant:
+ m_jit.addWeakReference(node->weakConstant());
initConstantInfo(node);
break;
case Identity: {
- speculate(node, node->child1());
- switch (node->child1().useKind()) {
- case DoubleRepUse:
- case DoubleRepRealUse: {
- SpeculateDoubleOperand op(this, node->child1());
- doubleResult(op.fpr(), node);
- break;
- }
- case Int52RepUse:
- case MachineIntUse:
- case DoubleRepMachineIntUse: {
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- default: {
- JSValueOperand op(this, node->child1());
- jsValueResult(op.tagGPR(), op.payloadGPR(), node);
- break;
- }
- } // switch
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
case GetLocal: {
+ SpeculatedType prediction = node->variableAccessData()->prediction();
AbstractValue& value = m_state.variables().operand(node->local());
+ // If we have no prediction for this local, then don't attempt to compile.
+ if (prediction == SpecNone) {
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
+ break;
+ }
+
// If the CFA is tracking this variable and it found that the variable
// cannot have been assigned, then don't attempt to proceed.
if (value.isClear()) {
- m_compileOkay = false;
+ // FIXME: We should trap instead.
+ // https://bugs.webkit.org/show_bug.cgi?id=110383
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
break;
}
@@ -1862,7 +1868,8 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case FlushedJSValue: {
+ case FlushedJSValue:
+ case FlushedArguments: {
GPRTemporary result(this);
GPRTemporary tag(this);
m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr());
@@ -1893,18 +1900,13 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case MovHint: {
- compileMovHint(m_currentNode);
- noResult(node);
- break;
- }
-
- case ZombieHint: {
- recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
- noResult(node);
+ case MovHint:
+ case ZombieHint:
+ case Check: {
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
-
+
case SetLocal: {
switch (node->variableAccessData()->flushFormat()) {
case FlushedDouble: {
@@ -1943,7 +1945,16 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case FlushedJSValue: {
+ case FlushedJSValue:
+ case FlushedArguments: {
+ if (generationInfoFromVirtualRegister(node->child1()->virtualRegister()).registerFormat() == DataFormatDouble) {
+ SpeculateDoubleOperand value(this, node->child1(), ManualOperandSpeculation);
+ m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node->machineLocal()));
+ noResult(node);
+ recordSetLocal(DataFormatDouble);
+ break;
+ }
+
JSValueOperand value(this, node->child1());
m_jit.store32(value.payloadGPR(), JITCompiler::payloadFor(node->machineLocal()));
m_jit.store32(value.tagGPR(), JITCompiler::tagFor(node->machineLocal()));
@@ -1964,24 +1975,23 @@ void SpeculativeJIT::compile(Node* node)
// But it may be profitable to use this as a hook to run speculation checks
// on arguments, thereby allowing us to trivially eliminate such checks if
// the argument is not used.
- recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat()));
break;
case BitAnd:
case BitOr:
case BitXor:
- if (node->child1()->isInt32Constant()) {
+ if (isInt32Constant(node->child1().node())) {
SpeculateInt32Operand op2(this, node->child2());
GPRTemporary result(this, Reuse, op2);
- bitOp(op, node->child1()->asInt32(), op2.gpr(), result.gpr());
+ bitOp(op, valueOfInt32Constant(node->child1().node()), op2.gpr(), result.gpr());
int32Result(result.gpr(), node);
- } else if (node->child2()->isInt32Constant()) {
+ } else if (isInt32Constant(node->child2().node())) {
SpeculateInt32Operand op1(this, node->child1());
GPRTemporary result(this, Reuse, op1);
- bitOp(op, node->child2()->asInt32(), op1.gpr(), result.gpr());
+ bitOp(op, valueOfInt32Constant(node->child2().node()), op1.gpr(), result.gpr());
int32Result(result.gpr(), node);
} else {
@@ -2000,11 +2010,11 @@ void SpeculativeJIT::compile(Node* node)
case BitRShift:
case BitLShift:
case BitURShift:
- if (node->child2()->isInt32Constant()) {
+ if (isInt32Constant(node->child2().node())) {
SpeculateInt32Operand op1(this, node->child1());
GPRTemporary result(this, Reuse, op1);
- shiftOp(op, op1.gpr(), node->child2()->asInt32() & 0x1f, result.gpr());
+ shiftOp(op, op1.gpr(), valueOfInt32Constant(node->child2().node()) & 0x1f, result.gpr());
int32Result(result.gpr(), node);
} else {
@@ -2036,13 +2046,8 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case DoubleRep: {
- compileDoubleRep(node);
- break;
- }
-
- case ValueRep: {
- compileValueRep(node);
+ case Int32ToDouble: {
+ compileInt32ToDouble(node);
break;
}
@@ -2057,8 +2062,8 @@ void SpeculativeJIT::compile(Node* node)
flushRegisters();
- GPRFlushedCallResult2 resultTag(this);
- GPRFlushedCallResult resultPayload(this);
+ GPRResult2 resultTag(this);
+ GPRResult resultPayload(this);
if (isKnownNotNumber(node->child1().node()) || isKnownNotNumber(node->child2().node()))
callOperation(operationValueAddNotNumber, resultTag.gpr(), resultPayload.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR);
else
@@ -2072,10 +2077,6 @@ void SpeculativeJIT::compile(Node* node)
compileAdd(node);
break;
- case ArithClz32:
- compileArithClz32(node);
- break;
-
case MakeRope:
compileMakeRope(node);
break;
@@ -2102,11 +2103,6 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case ArithPow: {
- compileArithPow(node);
- break;
- }
-
case ArithAbs: {
switch (node->child1().useKind()) {
case Int32Use: {
@@ -2124,7 +2120,7 @@ void SpeculativeJIT::compile(Node* node)
}
- case DoubleRepUse: {
+ case NumberUse: {
SpeculateDoubleOperand op1(this, node->child1());
FPRTemporary result(this);
@@ -2166,7 +2162,7 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case DoubleRepUse: {
+ case NumberUse: {
SpeculateDoubleOperand op1(this, node->child1());
SpeculateDoubleOperand op2(this, node->child2());
FPRTemporary result(this, op1);
@@ -2210,26 +2206,17 @@ void SpeculativeJIT::compile(Node* node)
}
break;
}
-
- case ArithSqrt:
- compileArithSqrt(node);
- break;
-
- case ArithFRound: {
+
+ case ArithSqrt: {
SpeculateDoubleOperand op1(this, node->child1());
FPRTemporary result(this, op1);
- m_jit.convertDoubleToFloat(op1.fpr(), result.fpr());
- m_jit.convertFloatToDouble(result.fpr(), result.fpr());
+ m_jit.sqrtDouble(op1.fpr(), result.fpr());
doubleResult(result.fpr(), node);
break;
}
- case ArithRound:
- compileArithRound(node);
- break;
-
case ArithSin: {
SpeculateDoubleOperand op1(this, node->child1());
FPRReg op1FPR = op1.fpr();
@@ -2254,10 +2241,6 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case ArithLog:
- compileArithLog(node);
- break;
-
case LogicalNot:
compileLogicalNot(node);
break;
@@ -2283,7 +2266,7 @@ void SpeculativeJIT::compile(Node* node)
break;
case CompareEqConstant:
- ASSERT(node->child2()->asJSValue().isNull());
+ ASSERT(isNullConstant(node->child2().node()));
if (nonSpeculativeCompareNull(node, node->child1()))
return;
break;
@@ -2293,6 +2276,11 @@ void SpeculativeJIT::compile(Node* node)
return;
break;
+ case CompareStrictEqConstant:
+ if (compileStrictEqForConstant(node, node->child1(), valueOfJSConstant(node->child2().node())))
+ return;
+ break;
+
case CompareStrictEq:
if (compileStrictEq(node))
return;
@@ -2330,30 +2318,8 @@ void SpeculativeJIT::compile(Node* node)
case Array::SelectUsingPredictions:
case Array::ForceExit:
RELEASE_ASSERT_NOT_REACHED();
-#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
-#endif
- break;
- case Array::Undecided: {
- SpeculateStrictInt32Operand index(this, node->child2());
- GPRTemporary resultTag(this, Reuse, index);
- GPRTemporary resultPayload(this);
-
- GPRReg indexGPR = index.gpr();
- GPRReg resultTagGPR = resultTag.gpr();
- GPRReg resultPayloadGPR = resultPayload.gpr();
-
- use(node->child1());
- index.use();
-
- speculationCheck(OutOfBounds, JSValueRegs(), node,
- m_jit.branch32(MacroAssembler::LessThan, indexGPR, MacroAssembler::TrustedImm32(0)));
-
- m_jit.move(MacroAssembler::TrustedImm32(JSValue::UndefinedTag), resultTagGPR);
- m_jit.move(MacroAssembler::TrustedImm32(0), resultPayloadGPR);
- jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly);
break;
- }
case Array::Generic: {
SpeculateCellOperand base(this, node->child1()); // Save a register, speculate cell. We'll probably be right.
JSValueOperand property(this, node->child2());
@@ -2362,8 +2328,8 @@ void SpeculativeJIT::compile(Node* node)
GPRReg propertyPayloadGPR = property.payloadGPR();
flushRegisters();
- GPRFlushedCallResult2 resultTag(this);
- GPRFlushedCallResult resultPayload(this);
+ GPRResult2 resultTag(this);
+ GPRResult resultPayload(this);
callOperation(operationGetByValCell, resultTag.gpr(), resultPayload.gpr(), baseGPR, propertyTagGPR, propertyPayloadGPR);
jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
@@ -2385,46 +2351,21 @@ void SpeculativeJIT::compile(Node* node)
GPRTemporary resultPayload(this);
if (node->arrayMode().type() == Array::Int32) {
- ASSERT(!node->arrayMode().isSaneChain());
-
speculationCheck(
OutOfBounds, JSValueRegs(), 0,
m_jit.branch32(
MacroAssembler::Equal,
- MacroAssembler::BaseIndex(
- storageReg, propertyReg, MacroAssembler::TimesEight, TagOffset),
+ MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)),
TrustedImm32(JSValue::EmptyValueTag)));
- m_jit.load32(
- MacroAssembler::BaseIndex(
- storageReg, propertyReg, MacroAssembler::TimesEight, PayloadOffset),
- resultPayload.gpr());
+ m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr());
int32Result(resultPayload.gpr(), node);
break;
}
GPRTemporary resultTag(this);
- m_jit.load32(
- MacroAssembler::BaseIndex(
- storageReg, propertyReg, MacroAssembler::TimesEight, TagOffset),
- resultTag.gpr());
- m_jit.load32(
- MacroAssembler::BaseIndex(
- storageReg, propertyReg, MacroAssembler::TimesEight, PayloadOffset),
- resultPayload.gpr());
- if (node->arrayMode().isSaneChain()) {
- JITCompiler::Jump notHole = m_jit.branch32(
- MacroAssembler::NotEqual, resultTag.gpr(),
- TrustedImm32(JSValue::EmptyValueTag));
- m_jit.move(TrustedImm32(JSValue::UndefinedTag), resultTag.gpr());
- m_jit.move(TrustedImm32(0), resultPayload.gpr());
- notHole.link(&m_jit);
- } else {
- speculationCheck(
- LoadFromHole, JSValueRegs(), 0,
- m_jit.branch32(
- MacroAssembler::Equal, resultTag.gpr(),
- TrustedImm32(JSValue::EmptyValueTag)));
- }
+ m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr());
+ speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag)));
+ m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr());
jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
break;
}
@@ -2579,11 +2520,8 @@ void SpeculativeJIT::compile(Node* node)
case Array::String:
compileGetByValOnString(node);
break;
- case Array::DirectArguments:
- compileGetByValOnDirectArguments(node);
- break;
- case Array::ScopedArguments:
- compileGetByValOnScopedArguments(node);
+ case Array::Arguments:
+ compileGetByValOnArguments(node);
break;
default: {
TypedArrayType type = node->arrayMode().typedArrayType();
@@ -2610,10 +2548,8 @@ void SpeculativeJIT::compile(Node* node)
case Array::SelectUsingPredictions:
case Array::ForceExit:
RELEASE_ASSERT_NOT_REACHED();
-#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
alreadyHandled = true;
-#endif
break;
case Array::Generic: {
ASSERT(node->op() == PutByVal || node->op() == PutByValDirect);
@@ -2762,6 +2698,12 @@ void SpeculativeJIT::compile(Node* node)
break;
}
+ case Array::Arguments:
+ // FIXME: we could at some point make this work. Right now we're assuming that the register
+ // pressure would be too great.
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+
default: {
TypedArrayType type = arrayMode.typedArrayType();
if (isInt(type))
@@ -2783,7 +2725,7 @@ void SpeculativeJIT::compile(Node* node)
GPRReg argumentGPR = argument.gpr();
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
// Must use jsValueResult because otherwise we screw up register
@@ -2798,8 +2740,8 @@ void SpeculativeJIT::compile(Node* node)
GPRReg argumentGPR = argument.gpr();
flushRegisters();
- GPRFlushedCallResult2 resultTag(this);
- GPRFlushedCallResult resultPayload(this);
+ GPRResult2 resultTag(this);
+ GPRResult resultPayload(this);
callOperation(operationRegExpExec, resultTag.gpr(), resultPayload.gpr(), baseGPR, argumentGPR);
jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
@@ -2813,7 +2755,7 @@ void SpeculativeJIT::compile(Node* node)
GPRReg argumentGPR = argument.gpr();
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
// If we add a DataFormatBool, we should use it here.
@@ -2884,7 +2826,7 @@ void SpeculativeJIT::compile(Node* node)
FPRReg valueFPR = value.fpr();
DFG_TYPE_CHECK(
- JSValueRegs(), node->child2(), SpecDoubleReal,
+ JSValueRegs(), node->child2(), SpecFullRealNumber,
m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, valueFPR, valueFPR));
m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
@@ -3000,7 +2942,7 @@ void SpeculativeJIT::compile(Node* node)
MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight),
tempFPR);
MacroAssembler::Jump slowCase = m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempFPR, tempFPR);
- JSValue nan = JSValue(JSValue::EncodeAsDouble, PNaN);
+ JSValue nan = JSValue(JSValue::EncodeAsDouble, QNaN);
m_jit.store32(
MacroAssembler::TrustedImm32(nan.u.asBits.tag),
MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
@@ -3070,7 +3012,7 @@ void SpeculativeJIT::compile(Node* node)
}
case DFG::Jump: {
- jump(node->targetBlock());
+ jump(node->takenBlock());
noResult(node);
break;
}
@@ -3105,7 +3047,12 @@ void SpeculativeJIT::compile(Node* node)
}
}
- m_jit.emitFunctionEpilogue();
+ // Grab the return address.
+ m_jit.emitGetReturnPCFromCallFrameHeaderPtr(GPRInfo::regT2);
+ // Restore our caller's "r".
+ m_jit.emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::callFrameRegister);
+ // Return.
+ m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT2);
m_jit.ret();
noResult(node);
@@ -3120,60 +3067,6 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case BooleanToNumber: {
- switch (node->child1().useKind()) {
- case BooleanUse: {
- SpeculateBooleanOperand value(this, node->child1());
- GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
-
- m_jit.move(value.gpr(), result.gpr());
-
- int32Result(result.gpr(), node);
- break;
- }
-
- case UntypedUse: {
- JSValueOperand value(this, node->child1());
-
- if (!m_interpreter.needsTypeCheck(node->child1(), SpecBoolInt32 | SpecBoolean)) {
- GPRTemporary result(this);
-
- GPRReg valueGPR = value.payloadGPR();
- GPRReg resultGPR = result.gpr();
-
- m_jit.move(valueGPR, resultGPR);
- int32Result(result.gpr(), node);
- break;
- }
-
- GPRTemporary resultTag(this);
- GPRTemporary resultPayload(this);
-
- GPRReg valueTagGPR = value.tagGPR();
- GPRReg valuePayloadGPR = value.payloadGPR();
- GPRReg resultTagGPR = resultTag.gpr();
- GPRReg resultPayloadGPR = resultPayload.gpr();
-
- m_jit.move(valuePayloadGPR, resultPayloadGPR);
- JITCompiler::Jump isBoolean = m_jit.branch32(
- JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::BooleanTag));
- m_jit.move(valueTagGPR, resultTagGPR);
- JITCompiler::Jump done = m_jit.jump();
- isBoolean.link(&m_jit);
- m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR);
- done.link(&m_jit);
-
- jsValueResult(resultTagGPR, resultPayloadGPR, node);
- break;
- }
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- break;
- }
-
case ToPrimitive: {
RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
JSValueOperand op1(this, node->child1());
@@ -3191,8 +3084,8 @@ void SpeculativeJIT::compile(Node* node)
m_jit.move(op1TagGPR, resultTagGPR);
m_jit.move(op1PayloadGPR, resultPayloadGPR);
} else {
- MacroAssembler::Jump alreadyPrimitive = m_jit.branchIfNotCell(op1.jsValueRegs());
- MacroAssembler::Jump notPrimitive = m_jit.branchIfObject(op1PayloadGPR);
+ MacroAssembler::Jump alreadyPrimitive = m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, TrustedImm32(JSValue::CellTag));
+ MacroAssembler::Jump notPrimitive = m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1PayloadGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()));
alreadyPrimitive.link(&m_jit);
m_jit.move(op1TagGPR, resultTagGPR);
@@ -3208,40 +3101,38 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case ToString:
- case CallStringConstructor: {
+ case ToString: {
if (node->child1().useKind() == UntypedUse) {
JSValueOperand op1(this, node->child1());
GPRReg op1PayloadGPR = op1.payloadGPR();
GPRReg op1TagGPR = op1.tagGPR();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultGPR = result.gpr();
flushRegisters();
JITCompiler::Jump done;
if (node->child1()->prediction() & SpecString) {
- JITCompiler::Jump slowPath1 = m_jit.branchIfNotCell(op1.jsValueRegs());
- JITCompiler::Jump slowPath2 = m_jit.branchIfNotString(op1PayloadGPR);
+ JITCompiler::Jump slowPath1 = m_jit.branch32(
+ JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::CellTag));
+ JITCompiler::Jump slowPath2 = m_jit.branchPtr(
+ JITCompiler::NotEqual,
+ JITCompiler::Address(op1PayloadGPR, JSCell::structureOffset()),
+ TrustedImmPtr(m_jit.vm()->stringStructure.get()));
m_jit.move(op1PayloadGPR, resultGPR);
done = m_jit.jump();
slowPath1.link(&m_jit);
slowPath2.link(&m_jit);
}
- if (op == ToString)
- callOperation(operationToString, resultGPR, op1TagGPR, op1PayloadGPR);
- else {
- ASSERT(op == CallStringConstructor);
- callOperation(operationCallStringConstructor, resultGPR, op1TagGPR, op1PayloadGPR);
- }
+ callOperation(operationToString, resultGPR, op1TagGPR, op1PayloadGPR);
if (done.isSet())
done.link(&m_jit);
cellResult(resultGPR, node);
break;
}
- compileToStringOrCallStringConstructorOnCell(node);
+ compileToStringOnCell(node);
break;
}
@@ -3251,8 +3142,8 @@ void SpeculativeJIT::compile(Node* node)
}
case NewArray: {
- JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
- if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) {
+ JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin);
+ if (!globalObject->isHavingABadTime() && !hasArrayStorage(node->indexingType())) {
Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType());
ASSERT(structure->indexingType() == node->indexingType());
ASSERT(
@@ -3287,7 +3178,7 @@ void SpeculativeJIT::compile(Node* node)
SpeculateDoubleOperand operand(this, use);
FPRReg opFPR = operand.fpr();
DFG_TYPE_CHECK(
- JSValueRegs(), use, SpecDoubleReal,
+ JSValueRegs(), use, SpecFullRealNumber,
m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR));
m_jit.storeDouble(opFPR, MacroAssembler::Address(storageGPR, sizeof(double) * operandIdx));
@@ -3326,7 +3217,7 @@ void SpeculativeJIT::compile(Node* node)
if (!node->numChildren()) {
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
callOperation(
operationNewEmptyArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()));
cellResult(result.gpr(), node);
@@ -3355,7 +3246,7 @@ void SpeculativeJIT::compile(Node* node)
JSValueRegs(), use, SpecFullRealNumber,
m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR));
- m_jit.storeDouble(opFPR, TrustedImmPtr(reinterpret_cast<char*>(buffer + operandIdx)));
+ m_jit.storeDouble(opFPR, reinterpret_cast<char*>(buffer + operandIdx));
break;
}
case ALL_INT32_INDEXING_TYPES: {
@@ -3401,7 +3292,7 @@ void SpeculativeJIT::compile(Node* node)
m_jit.storePtr(TrustedImmPtr(scratchSize), scratch.gpr());
}
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
callOperation(
operationNewArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()),
@@ -3419,8 +3310,8 @@ void SpeculativeJIT::compile(Node* node)
}
case NewArrayWithSize: {
- JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
- if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) {
+ JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin);
+ if (!globalObject->isHavingABadTime() && !hasArrayStorage(node->indexingType())) {
SpeculateStrictInt32Operand size(this, node->child1());
GPRTemporary result(this);
GPRTemporary storage(this);
@@ -3434,7 +3325,7 @@ void SpeculativeJIT::compile(Node* node)
GPRReg scratch2GPR = scratch2.gpr();
MacroAssembler::JumpList slowCases;
- slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)));
+ slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_SPARSE_ARRAY_INDEX)));
ASSERT((1 << 3) == sizeof(JSValue));
m_jit.move(sizeGPR, scratchGPR);
@@ -3450,7 +3341,7 @@ void SpeculativeJIT::compile(Node* node)
m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
if (hasDouble(node->indexingType())) {
- JSValue nan = JSValue(JSValue::EncodeAsDouble, PNaN);
+ JSValue nan = JSValue(JSValue::EncodeAsDouble, QNaN);
m_jit.move(sizeGPR, scratchGPR);
MacroAssembler::Jump done = m_jit.branchTest32(MacroAssembler::Zero, scratchGPR);
@@ -3462,11 +3353,12 @@ void SpeculativeJIT::compile(Node* node)
done.link(&m_jit);
}
- addSlowPathGenerator(std::make_unique<CallArrayAllocatorWithVariableSizeSlowPathGenerator>(
+ addSlowPathGenerator(adoptPtr(
+ new CallArrayAllocatorWithVariableSizeSlowPathGenerator(
slowCases, this, operationNewArrayWithSize, resultGPR,
globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()),
globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage),
- sizeGPR));
+ sizeGPR)));
cellResult(resultGPR, node);
break;
@@ -3475,10 +3367,10 @@ void SpeculativeJIT::compile(Node* node)
SpeculateStrictInt32Operand size(this, node->child1());
GPRReg sizeGPR = size.gpr();
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultGPR = result.gpr();
GPRReg structureGPR = selectScratchGPR(sizeGPR);
- MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
+ MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_SPARSE_ARRAY_INDEX));
m_jit.move(TrustedImmPtr(globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())), structureGPR);
MacroAssembler::Jump done = m_jit.jump();
bigLength.link(&m_jit);
@@ -3491,9 +3383,9 @@ void SpeculativeJIT::compile(Node* node)
}
case NewArrayBuffer: {
- JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
+ JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin);
IndexingType indexingType = node->indexingType();
- if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(indexingType)) {
+ if (!globalObject->isHavingABadTime() && !hasArrayStorage(indexingType)) {
unsigned numElements = node->numConstants();
GPRTemporary result(this);
@@ -3528,7 +3420,7 @@ void SpeculativeJIT::compile(Node* node)
}
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
callOperation(operationNewArrayBuffer, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), node->startConstant(), node->numConstants());
@@ -3548,10 +3440,10 @@ void SpeculativeJIT::compile(Node* node)
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultGPR = result.gpr();
- JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
+ JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin);
callOperation(
operationNewTypedArrayWithOneArgumentForType(node->typedArrayType()),
resultGPR, globalObject->typedArrayStructure(node->typedArrayType()),
@@ -3569,8 +3461,8 @@ void SpeculativeJIT::compile(Node* node)
case NewRegexp: {
flushRegisters();
- GPRFlushedCallResult resultPayload(this);
- GPRFlushedCallResult2 resultTag(this);
+ GPRResult resultPayload(this);
+ GPRResult2 resultTag(this);
callOperation(operationNewRegexp, resultTag.gpr(), resultPayload.gpr(), m_jit.codeBlock()->regexp(node->regexpIndex()));
@@ -3590,15 +3482,18 @@ void SpeculativeJIT::compile(Node* node)
GPRReg tempTagGPR = tempTag.gpr();
MacroAssembler::JumpList slowCases;
- slowCases.append(m_jit.branchIfNotCell(thisValue.jsValueRegs()));
+ slowCases.append(m_jit.branch32(
+ MacroAssembler::NotEqual, thisValueTagGPR, TrustedImm32(JSValue::CellTag)));
+ m_jit.loadPtr(
+ MacroAssembler::Address(thisValuePayloadGPR, JSCell::structureOffset()), tempGPR);
slowCases.append(m_jit.branch8(
MacroAssembler::NotEqual,
- MacroAssembler::Address(thisValuePayloadGPR, JSCell::typeInfoTypeOffset()),
+ MacroAssembler::Address(tempGPR, Structure::typeInfoTypeOffset()),
TrustedImm32(FinalObjectType)));
m_jit.move(thisValuePayloadGPR, tempGPR);
m_jit.move(thisValueTagGPR, tempTagGPR);
J_JITOperation_EJ function;
- if (m_jit.graph().executableFor(node->origin.semantic)->isStrictMode())
+ if (m_jit.graph().executableFor(node->codeOrigin)->isStrictMode())
function = operationToThisStrict;
else
function = operationToThis;
@@ -3629,16 +3524,11 @@ void SpeculativeJIT::compile(Node* node)
GPRReg allocatorGPR = allocator.gpr();
GPRReg structureGPR = structure.gpr();
GPRReg scratchGPR = scratch.gpr();
- // Rare data is only used to access the allocator & structure
- // We can avoid using an additional GPR this way
- GPRReg rareDataGPR = structureGPR;
MacroAssembler::JumpList slowPath;
- m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfRareData()), rareDataGPR);
- slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, rareDataGPR));
- m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR);
- m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR);
+ m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR);
+ m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR);
slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, allocatorGPR));
emitAllocateJSObject(resultGPR, allocatorGPR, structureGPR, TrustedImmPtr(0), scratchGPR, slowPath);
@@ -3648,6 +3538,12 @@ void SpeculativeJIT::compile(Node* node)
break;
}
+ case AllocationProfileWatchpoint:
+ case TypedArrayWatchpoint: {
+ noResult(node);
+ break;
+ }
+
case NewObject: {
GPRTemporary result(this);
GPRTemporary allocator(this);
@@ -3679,44 +3575,90 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case GetArgumentCount: {
+ case GetScope: {
+ SpeculateCellOperand function(this, node->child1());
+ GPRTemporary result(this, Reuse, function);
+ m_jit.loadPtr(JITCompiler::Address(function.gpr(), JSFunction::offsetOfScopeChain()), result.gpr());
+ cellResult(result.gpr(), node);
+ break;
+ }
+
+ case GetMyScope: {
GPRTemporary result(this);
- m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), result.gpr());
- int32Result(result.gpr(), node);
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.loadPtr(JITCompiler::payloadFor(JSStack::ScopeChain), resultGPR);
+ cellResult(resultGPR, node);
break;
}
- case GetScope:
- compileGetScope(node);
+ case SkipTopScope: {
+ SpeculateCellOperand scope(this, node->child1());
+ GPRTemporary result(this, Reuse, scope);
+ GPRReg resultGPR = result.gpr();
+ m_jit.move(scope.gpr(), resultGPR);
+ JITCompiler::Jump activationNotCreated =
+ m_jit.branchTestPtr(
+ JITCompiler::Zero,
+ JITCompiler::payloadFor(
+ static_cast<VirtualRegister>(m_jit.graph().machineActivationRegister())));
+ m_jit.loadPtr(JITCompiler::Address(resultGPR, JSScope::offsetOfNext()), resultGPR);
+ activationNotCreated.link(&m_jit);
+ cellResult(resultGPR, node);
break;
+ }
- case SkipScope:
- compileSkipScope(node);
+ case SkipScope: {
+ SpeculateCellOperand scope(this, node->child1());
+ GPRTemporary result(this, Reuse, scope);
+ m_jit.loadPtr(JITCompiler::Address(scope.gpr(), JSScope::offsetOfNext()), result.gpr());
+ cellResult(result.gpr(), node);
break;
+ }
+ case GetClosureRegisters: {
+ if (WriteBarrierBase<Unknown>* registers = m_jit.graph().tryGetRegisters(node->child1().node())) {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+ m_jit.move(TrustedImmPtr(registers), resultGPR);
+ storageResult(resultGPR, node);
+ break;
+ }
+
+ SpeculateCellOperand scope(this, node->child1());
+ GPRTemporary result(this);
+ GPRReg scopeGPR = scope.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.loadPtr(JITCompiler::Address(scopeGPR, JSVariableObject::offsetOfRegisters()), resultGPR);
+ storageResult(resultGPR, node);
+ break;
+ }
case GetClosureVar: {
- SpeculateCellOperand base(this, node->child1());
+ StorageOperand registers(this, node->child1());
GPRTemporary resultTag(this);
GPRTemporary resultPayload(this);
- GPRReg baseGPR = base.gpr();
+ GPRReg registersGPR = registers.gpr();
GPRReg resultTagGPR = resultTag.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
- m_jit.load32(JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()) + TagOffset), resultTagGPR);
- m_jit.load32(JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()) + PayloadOffset), resultPayloadGPR);
+ m_jit.load32(JITCompiler::Address(registersGPR, node->varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ m_jit.load32(JITCompiler::Address(registersGPR, node->varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
jsValueResult(resultTagGPR, resultPayloadGPR, node);
break;
}
-
case PutClosureVar: {
- SpeculateCellOperand base(this, node->child1());
- JSValueOperand value(this, node->child2());
+ StorageOperand registers(this, node->child2());
+ JSValueOperand value(this, node->child3());
+ GPRTemporary scratchRegister(this);
- GPRReg baseGPR = base.gpr();
+ GPRReg registersGPR = registers.gpr();
GPRReg valueTagGPR = value.tagGPR();
GPRReg valuePayloadGPR = value.payloadGPR();
- m_jit.store32(valueTagGPR, JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()) + TagOffset));
- m_jit.store32(valuePayloadGPR, JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()) + PayloadOffset));
+ speculate(node, node->child1());
+
+ m_jit.store32(valueTagGPR, JITCompiler::Address(registersGPR, node->varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ m_jit.store32(valuePayloadGPR, JITCompiler::Address(registersGPR, node->varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
noResult(node);
break;
}
@@ -3727,8 +3669,8 @@ void SpeculativeJIT::compile(Node* node)
switch (node->child1().useKind()) {
case CellUse: {
SpeculateCellOperand base(this, node->child1());
- GPRTemporary resultTag(this);
- GPRTemporary resultPayload(this, Reuse, base);
+ GPRTemporary resultTag(this, Reuse, base);
+ GPRTemporary resultPayload(this);
GPRReg baseGPR = base.gpr();
GPRReg resultTagGPR = resultTag.gpr();
@@ -3736,7 +3678,7 @@ void SpeculativeJIT::compile(Node* node)
base.use();
- cachedGetById(node->origin.semantic, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber());
+ cachedGetById(node->codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber());
jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly);
break;
@@ -3744,8 +3686,8 @@ void SpeculativeJIT::compile(Node* node)
case UntypedUse: {
JSValueOperand base(this, node->child1());
- GPRTemporary resultTag(this);
- GPRTemporary resultPayload(this, Reuse, base, TagWord);
+ GPRTemporary resultTag(this, Reuse, base, TagWord);
+ GPRTemporary resultPayload(this);
GPRReg baseTagGPR = base.tagGPR();
GPRReg basePayloadGPR = base.payloadGPR();
@@ -3754,9 +3696,9 @@ void SpeculativeJIT::compile(Node* node)
base.use();
- JITCompiler::Jump notCell = m_jit.branchIfNotCell(base.jsValueRegs());
+ JITCompiler::Jump notCell = m_jit.branch32(JITCompiler::NotEqual, baseTagGPR, TrustedImm32(JSValue::CellTag));
- cachedGetById(node->origin.semantic, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), notCell);
+ cachedGetById(node->codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), notCell);
jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly);
break;
@@ -3781,16 +3723,16 @@ void SpeculativeJIT::compile(Node* node)
GPRReg baseGPR = base.gpr();
- GPRFlushedCallResult resultPayload(this);
- GPRFlushedCallResult2 resultTag(this);
- GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRResult resultTag(this);
+ GPRResult2 resultPayload(this);
GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
base.use();
flushRegisters();
- cachedGetById(node->origin.semantic, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), JITCompiler::Jump(), DontSpill);
+ cachedGetById(node->codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), JITCompiler::Jump(), DontSpill);
jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly);
break;
@@ -3801,18 +3743,18 @@ void SpeculativeJIT::compile(Node* node)
GPRReg baseTagGPR = base.tagGPR();
GPRReg basePayloadGPR = base.payloadGPR();
- GPRFlushedCallResult resultPayload(this);
- GPRFlushedCallResult2 resultTag(this);
- GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRResult resultTag(this);
+ GPRResult2 resultPayload(this);
GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
base.use();
flushRegisters();
- JITCompiler::Jump notCell = m_jit.branchIfNotCell(base.jsValueRegs());
+ JITCompiler::Jump notCell = m_jit.branch32(JITCompiler::NotEqual, baseTagGPR, TrustedImm32(JSValue::CellTag));
- cachedGetById(node->origin.semantic, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), notCell, DontSpill);
+ cachedGetById(node->codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), notCell, DontSpill);
jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly);
break;
@@ -3829,33 +3771,17 @@ void SpeculativeJIT::compile(Node* node)
compileGetArrayLength(node);
break;
- case CheckCell: {
- SpeculateCellOperand cell(this, node->child1());
- speculationCheck(BadCell, JSValueSource::unboxedCell(cell.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, cell.gpr(), node->cellOperand()->cell()));
- noResult(node);
- break;
- }
-
- case CheckNotEmpty: {
- JSValueOperand operand(this, node->child1());
- GPRReg tagGPR = operand.tagGPR();
- speculationCheck(TDZFailure, JSValueSource(), nullptr, m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::EmptyValueTag)));
+ case CheckFunction: {
+ SpeculateCellOperand function(this, node->child1());
+ speculationCheck(BadFunction, JSValueSource::unboxedCell(function.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, function.gpr(), node->function()));
noResult(node);
break;
}
- case CheckIdent:
- compileCheckIdent(node);
- break;
-
- case GetExecutable: {
+ case CheckExecutable: {
SpeculateCellOperand function(this, node->child1());
- GPRTemporary result(this, Reuse, function);
- GPRReg functionGPR = function.gpr();
- GPRReg resultGPR = result.gpr();
- speculateCellType(node->child1(), functionGPR, SpecFunction, JSFunctionType);
- m_jit.loadPtr(JITCompiler::Address(functionGPR, JSFunction::offsetOfExecutable()), resultGPR);
- cellResult(resultGPR, node);
+ speculationCheck(BadExecutable, JSValueSource::unboxedCell(function.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, JITCompiler::Address(function.gpr(), JSFunction::offsetOfExecutable()), node->executable()));
+ noResult(node);
break;
}
@@ -3869,12 +3795,12 @@ void SpeculativeJIT::compile(Node* node)
BadCache, JSValueSource::unboxedCell(base.gpr()), 0,
m_jit.branchWeakPtr(
JITCompiler::NotEqual,
- JITCompiler::Address(base.gpr(), JSCell::structureIDOffset()),
+ JITCompiler::Address(base.gpr(), JSCell::structureOffset()),
node->structureSet()[0]));
} else {
GPRTemporary structure(this);
- m_jit.loadPtr(JITCompiler::Address(base.gpr(), JSCell::structureIDOffset()), structure.gpr());
+ m_jit.loadPtr(JITCompiler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr());
JITCompiler::JumpList done;
@@ -3893,19 +3819,43 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case PutStructure: {
- Structure* oldStructure = node->transition()->previous;
- Structure* newStructure = node->transition()->next;
+ case StructureTransitionWatchpoint: {
+ // There is a fascinating question here of what to do about array profiling.
+ // We *could* try to tell the OSR exit about where the base of the access is.
+ // The DFG will have kept it alive, though it may not be in a register, and
+ // we shouldn't really load it since that could be a waste. For now though,
+ // we'll just rely on the fact that when a watchpoint fires then that's
+ // quite a hint already.
+
+ m_jit.addWeakReference(node->structure());
+
+#if !ASSERT_DISABLED
+ SpeculateCellOperand op1(this, node->child1());
+ JITCompiler::Jump isOK = m_jit.branchPtr(JITCompiler::Equal, JITCompiler::Address(op1.gpr(), JSCell::structureOffset()), TrustedImmPtr(node->structure()));
+ m_jit.breakpoint();
+ isOK.link(&m_jit);
+#else
+ speculateCell(node->child1());
+#endif
+ noResult(node);
+ break;
+ }
+
+ case PhantomPutStructure: {
+ ASSERT(isKnownCell(node->child1().node()));
+ m_jit.jitCode()->common.notifyCompilingStructureTransition(m_jit.graph().m_plan, m_jit.codeBlock(), node);
+ noResult(node);
+ break;
+ }
+
+ case PutStructure: {
m_jit.jitCode()->common.notifyCompilingStructureTransition(m_jit.graph().m_plan, m_jit.codeBlock(), node);
SpeculateCellOperand base(this, node->child1());
GPRReg baseGPR = base.gpr();
- ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType());
- ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
- ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
- m_jit.storePtr(MacroAssembler::TrustedImmPtr(newStructure), MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()));
+ m_jit.storePtr(MacroAssembler::TrustedImmPtr(node->structureTransitionData().newStructure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
noResult(node);
break;
@@ -3956,7 +3906,7 @@ void SpeculativeJIT::compile(Node* node)
GPRReg resultTagGPR = resultTag.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
- StorageAccessData& storageAccessData = node->storageAccessData();
+ StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node->storageAccessDataIndex()];
m_jit.load32(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
m_jit.load32(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
@@ -3965,47 +3915,6 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case GetGetterSetterByOffset: {
- StorageOperand storage(this, node->child1());
- GPRTemporary resultPayload(this);
-
- GPRReg storageGPR = storage.gpr();
- GPRReg resultPayloadGPR = resultPayload.gpr();
-
- StorageAccessData& storageAccessData = node->storageAccessData();
-
- m_jit.load32(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
-
- cellResult(resultPayloadGPR, node);
- break;
- }
-
- case GetGetter: {
- SpeculateCellOperand op1(this, node->child1());
- GPRTemporary result(this, Reuse, op1);
-
- GPRReg op1GPR = op1.gpr();
- GPRReg resultGPR = result.gpr();
-
- m_jit.loadPtr(JITCompiler::Address(op1GPR, GetterSetter::offsetOfGetter()), resultGPR);
-
- cellResult(resultGPR, node);
- break;
- }
-
- case GetSetter: {
- SpeculateCellOperand op1(this, node->child1());
- GPRTemporary result(this, Reuse, op1);
-
- GPRReg op1GPR = op1.gpr();
- GPRReg resultGPR = result.gpr();
-
- m_jit.loadPtr(JITCompiler::Address(op1GPR, GetterSetter::offsetOfSetter()), resultGPR);
-
- cellResult(resultGPR, node);
- break;
- }
-
case PutByOffset: {
StorageOperand storage(this, node->child1());
JSValueOperand value(this, node->child3());
@@ -4016,7 +3925,7 @@ void SpeculativeJIT::compile(Node* node)
speculate(node, node->child2());
- StorageAccessData& storageAccessData = node->storageAccessData();
+ StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node->storageAccessDataIndex()];
m_jit.storePtr(valueTagGPR, JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
m_jit.storePtr(valuePayloadGPR, JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
@@ -4024,23 +3933,6 @@ void SpeculativeJIT::compile(Node* node)
noResult(node);
break;
}
-
- case PutByIdFlush: {
- SpeculateCellOperand base(this, node->child1());
- JSValueOperand value(this, node->child2());
- GPRTemporary scratch(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg valueTagGPR = value.tagGPR();
- GPRReg valuePayloadGPR = value.payloadGPR();
- GPRReg scratchGPR = scratch.gpr();
- flushRegisters();
-
- cachedPutById(node->origin.semantic, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), NotDirect, MacroAssembler::Jump(), DontSpill);
-
- noResult(node);
- break;
- }
case PutById: {
SpeculateCellOperand base(this, node->child1());
@@ -4052,7 +3944,7 @@ void SpeculativeJIT::compile(Node* node)
GPRReg valuePayloadGPR = value.payloadGPR();
GPRReg scratchGPR = scratch.gpr();
- cachedPutById(node->origin.semantic, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), NotDirect);
+ cachedPutById(node->codeOrigin, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), NotDirect);
noResult(node);
break;
@@ -4068,7 +3960,7 @@ void SpeculativeJIT::compile(Node* node)
GPRReg valuePayloadGPR = value.payloadGPR();
GPRReg scratchGPR = scratch.gpr();
- cachedPutById(node->origin.semantic, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), Direct);
+ cachedPutById(node->codeOrigin, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), Direct);
noResult(node);
break;
@@ -4078,7 +3970,7 @@ void SpeculativeJIT::compile(Node* node)
GPRTemporary resultPayload(this);
GPRTemporary resultTag(this);
- m_jit.move(TrustedImmPtr(node->variablePointer()), resultPayload.gpr());
+ m_jit.move(TrustedImmPtr(node->registerPointer()), resultPayload.gpr());
m_jit.load32(JITCompiler::Address(resultPayload.gpr(), OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTag.gpr());
m_jit.load32(JITCompiler::Address(resultPayload.gpr(), OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayload.gpr());
@@ -4087,25 +3979,77 @@ void SpeculativeJIT::compile(Node* node)
}
case PutGlobalVar: {
- JSValueOperand value(this, node->child2());
+ JSValueOperand value(this, node->child1());
// FIXME: if we happen to have a spare register - and _ONLY_ if we happen to have
// a spare register - a good optimization would be to put the register pointer into
// a register and then do a zero offset store followed by a four-offset store (or
// vice-versa depending on endianness).
- m_jit.store32(value.tagGPR(), node->variablePointer()->tagPointer());
- m_jit.store32(value.payloadGPR(), node->variablePointer()->payloadPointer());
+ m_jit.store32(value.tagGPR(), node->registerPointer()->tagPointer());
+ m_jit.store32(value.payloadGPR(), node->registerPointer()->payloadPointer());
noResult(node);
break;
}
case NotifyWrite: {
- compileNotifyWrite(node);
+ VariableWatchpointSet* set = node->variableWatchpointSet();
+
+ JSValueOperand value(this, node->child1());
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+
+ GPRTemporary temp(this);
+ GPRReg tempGPR = temp.gpr();
+
+ m_jit.load8(set->addressOfState(), tempGPR);
+
+ JITCompiler::JumpList ready;
+
+ ready.append(m_jit.branch32(JITCompiler::Equal, tempGPR, TrustedImm32(IsInvalidated)));
+
+ if (set->state() == ClearWatchpoint) {
+ JITCompiler::Jump isWatched =
+ m_jit.branch32(JITCompiler::NotEqual, tempGPR, TrustedImm32(ClearWatchpoint));
+
+ m_jit.store32(valueTagGPR, &set->addressOfInferredValue()->u.asBits.tag);
+ m_jit.store32(valuePayloadGPR, &set->addressOfInferredValue()->u.asBits.payload);
+ m_jit.store8(TrustedImm32(IsWatched), set->addressOfState());
+ ready.append(m_jit.jump());
+
+ isWatched.link(&m_jit);
+ }
+
+ JITCompiler::Jump definitelyNotEqual = m_jit.branch32(
+ JITCompiler::NotEqual,
+ JITCompiler::AbsoluteAddress(&set->addressOfInferredValue()->u.asBits.payload),
+ valuePayloadGPR);
+ ready.append(m_jit.branch32(
+ JITCompiler::Equal,
+ JITCompiler::AbsoluteAddress(&set->addressOfInferredValue()->u.asBits.tag),
+ valueTagGPR));
+ definitelyNotEqual.link(&m_jit);
+
+ JITCompiler::Jump slowCase = m_jit.branchTest8(
+ JITCompiler::NonZero, JITCompiler::AbsoluteAddress(set->addressOfSetIsNotEmpty()));
+ m_jit.store8(TrustedImm32(IsInvalidated), set->addressOfState());
+ m_jit.store32(
+ TrustedImm32(JSValue::EmptyValueTag),
+ &set->addressOfInferredValue()->u.asBits.tag);
+ m_jit.store32(
+ TrustedImm32(0), &set->addressOfInferredValue()->u.asBits.payload);
+
+ ready.link(&m_jit);
+
+ addSlowPathGenerator(
+ slowPathCall(slowCase, this, operationInvalidate, NoResult, set));
+
+ noResult(node);
break;
}
- case VarInjectionWatchpoint: {
+ case VarInjectionWatchpoint:
+ case VariableWatchpoint: {
noResult(node);
break;
}
@@ -4115,10 +4059,8 @@ void SpeculativeJIT::compile(Node* node)
GPRTemporary structure(this);
// Speculate that base 'ImplementsDefaultHasInstance'.
- speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branchTest8(
- MacroAssembler::Zero,
- MacroAssembler::Address(base.gpr(), JSCell::typeInfoFlagsOffset()),
- MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance)));
+ m_jit.loadPtr(MacroAssembler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(structure.gpr(), Structure::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance)));
noResult(node);
break;
@@ -4135,7 +4077,7 @@ void SpeculativeJIT::compile(Node* node)
GPRTemporary localGlobalObject(this);
GPRTemporary remoteGlobalObject(this);
- JITCompiler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
+ JITCompiler::Jump isCell = m_jit.branch32(JITCompiler::Equal, value.tagGPR(), JITCompiler::TrustedImm32(JSValue::CellTag));
m_jit.compare32(JITCompiler::Equal, value.tagGPR(), TrustedImm32(JSValue::UndefinedTag), result.gpr());
JITCompiler::Jump done = m_jit.jump();
@@ -4146,18 +4088,15 @@ void SpeculativeJIT::compile(Node* node)
m_jit.move(TrustedImm32(0), result.gpr());
notMasqueradesAsUndefined = m_jit.jump();
} else {
- JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(
- JITCompiler::NonZero,
- JITCompiler::Address(value.payloadGPR(), JSCell::typeInfoFlagsOffset()),
- TrustedImm32(MasqueradesAsUndefined));
+ m_jit.loadPtr(JITCompiler::Address(value.payloadGPR(), JSCell::structureOffset()), result.gpr());
+ JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::NonZero, JITCompiler::Address(result.gpr(), Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
m_jit.move(TrustedImm32(0), result.gpr());
notMasqueradesAsUndefined = m_jit.jump();
isMasqueradesAsUndefined.link(&m_jit);
GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
- m_jit.move(TrustedImmPtr(m_jit.globalObjectFor(node->origin.semantic)), localGlobalObjectGPR);
- m_jit.loadPtr(JITCompiler::Address(value.payloadGPR(), JSCell::structureIDOffset()), result.gpr());
+ m_jit.move(TrustedImmPtr(m_jit.globalObjectFor(node->codeOrigin)), localGlobalObjectGPR);
m_jit.loadPtr(JITCompiler::Address(result.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR);
m_jit.compare32(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, result.gpr());
}
@@ -4191,12 +4130,10 @@ void SpeculativeJIT::compile(Node* node)
JSValueOperand value(this, node->child1());
GPRTemporary result(this, Reuse, value, TagWord);
- JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs());
+ JITCompiler::Jump isNotCell = m_jit.branch32(JITCompiler::NotEqual, value.tagGPR(), JITCompiler::TrustedImm32(JSValue::CellTag));
- m_jit.compare8(JITCompiler::Equal,
- JITCompiler::Address(value.payloadGPR(), JSCell::typeInfoTypeOffset()),
- TrustedImm32(StringType),
- result.gpr());
+ m_jit.loadPtr(JITCompiler::Address(value.payloadGPR(), JSCell::structureOffset()), result.gpr());
+ m_jit.compare8(JITCompiler::Equal, JITCompiler::Address(result.gpr(), Structure::typeInfoTypeOffset()), TrustedImm32(StringType), result.gpr());
JITCompiler::Jump done = m_jit.jump();
isNotCell.link(&m_jit);
@@ -4209,35 +4146,86 @@ void SpeculativeJIT::compile(Node* node)
case IsObject: {
JSValueOperand value(this, node->child1());
- GPRTemporary result(this, Reuse, value, TagWord);
-
- JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs());
-
- m_jit.compare8(JITCompiler::AboveOrEqual,
- JITCompiler::Address(value.payloadGPR(), JSCell::typeInfoTypeOffset()),
- TrustedImm32(ObjectType),
- result.gpr());
- JITCompiler::Jump done = m_jit.jump();
-
- isNotCell.link(&m_jit);
- m_jit.move(TrustedImm32(0), result.gpr());
-
- done.link(&m_jit);
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+ flushRegisters();
+ callOperation(operationIsObject, resultGPR, valueTagGPR, valuePayloadGPR);
booleanResult(result.gpr(), node);
break;
}
- case IsObjectOrNull: {
- compileIsObjectOrNull(node);
- break;
- }
-
case IsFunction: {
- compileIsFunction(node);
+ JSValueOperand value(this, node->child1());
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+ flushRegisters();
+ callOperation(operationIsFunction, resultGPR, valueTagGPR, valuePayloadGPR);
+ booleanResult(result.gpr(), node);
break;
}
case TypeOf: {
- compileTypeOf(node);
+ JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
+ GPRReg tagGPR = value.tagGPR();
+ GPRReg payloadGPR = value.payloadGPR();
+ GPRTemporary temp(this);
+ GPRReg tempGPR = temp.gpr();
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+ JITCompiler::JumpList doneJumps;
+
+ flushRegisters();
+
+ ASSERT(node->child1().useKind() == UntypedUse || node->child1().useKind() == CellUse || node->child1().useKind() == StringUse);
+
+ JITCompiler::Jump isNotCell = m_jit.branch32(JITCompiler::NotEqual, tagGPR, JITCompiler::TrustedImm32(JSValue::CellTag));
+ if (node->child1().useKind() != UntypedUse)
+ DFG_TYPE_CHECK(JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecCell, isNotCell);
+
+ if (!node->child1()->shouldSpeculateObject() || node->child1().useKind() == StringUse) {
+ m_jit.loadPtr(JITCompiler::Address(payloadGPR, JSCell::structureOffset()), tempGPR);
+ JITCompiler::Jump notString = m_jit.branch8(JITCompiler::NotEqual, JITCompiler::Address(tempGPR, Structure::typeInfoTypeOffset()), TrustedImm32(StringType));
+ if (node->child1().useKind() == StringUse)
+ DFG_TYPE_CHECK(JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecString, notString);
+ m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.stringString()), resultGPR);
+ doneJumps.append(m_jit.jump());
+ if (node->child1().useKind() != StringUse) {
+ notString.link(&m_jit);
+ callOperation(operationTypeOf, resultGPR, payloadGPR);
+ doneJumps.append(m_jit.jump());
+ }
+ } else {
+ callOperation(operationTypeOf, resultGPR, payloadGPR);
+ doneJumps.append(m_jit.jump());
+ }
+
+ if (node->child1().useKind() == UntypedUse) {
+ isNotCell.link(&m_jit);
+
+ m_jit.add32(TrustedImm32(1), tagGPR, tempGPR);
+ JITCompiler::Jump notNumber = m_jit.branch32(JITCompiler::AboveOrEqual, tempGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
+ m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.numberString()), resultGPR);
+ doneJumps.append(m_jit.jump());
+ notNumber.link(&m_jit);
+
+ JITCompiler::Jump notUndefined = m_jit.branch32(JITCompiler::NotEqual, tagGPR, TrustedImm32(JSValue::UndefinedTag));
+ m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.undefinedString()), resultGPR);
+ doneJumps.append(m_jit.jump());
+ notUndefined.link(&m_jit);
+
+ JITCompiler::Jump notNull = m_jit.branch32(JITCompiler::NotEqual, tagGPR, TrustedImm32(JSValue::NullTag));
+ m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.objectString()), resultGPR);
+ doneJumps.append(m_jit.jump());
+ notNull.link(&m_jit);
+
+ // Only boolean left
+ m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.booleanString()), resultGPR);
+ }
+ doneJumps.link(&m_jit);
+ cellResult(resultGPR, node);
break;
}
@@ -4246,414 +4234,413 @@ void SpeculativeJIT::compile(Node* node)
case Call:
case Construct:
- case CallVarargs:
- case CallForwardVarargs:
- case ConstructVarargs:
- case ConstructForwardVarargs:
emitCall(node);
break;
- case LoadVarargs: {
- LoadVarargsData* data = node->loadVarargsData();
-
- GPRReg argumentsTagGPR;
- GPRReg argumentsPayloadGPR;
- {
- JSValueOperand arguments(this, node->child1());
- argumentsTagGPR = arguments.tagGPR();
- argumentsPayloadGPR = arguments.payloadGPR();
- flushRegisters();
- }
-
- callOperation(operationSizeOfVarargs, GPRInfo::returnValueGPR, argumentsTagGPR, argumentsPayloadGPR, data->offset);
-
- lock(GPRInfo::returnValueGPR);
- {
- JSValueOperand arguments(this, node->child1());
- argumentsTagGPR = arguments.tagGPR();
- argumentsPayloadGPR = arguments.payloadGPR();
- flushRegisters();
- }
- unlock(GPRInfo::returnValueGPR);
+ case CreateActivation: {
+ JSValueOperand value(this, node->child1());
+ GPRTemporary result(this, Reuse, value, PayloadWord);
- // FIXME: There is a chance that we will call an effectful length property twice. This is safe
- // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance
- // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right
- // past the sizing.
- // https://bugs.webkit.org/show_bug.cgi?id=141448
-
- GPRReg argCountIncludingThisGPR =
- JITCompiler::selectScratchGPR(GPRInfo::returnValueGPR, argumentsTagGPR, argumentsPayloadGPR);
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+ GPRReg resultGPR = result.gpr();
- m_jit.add32(TrustedImm32(1), GPRInfo::returnValueGPR, argCountIncludingThisGPR);
- speculationCheck(
- VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32(
- MacroAssembler::Above,
- argCountIncludingThisGPR,
- TrustedImm32(data->limit)));
+ m_jit.move(valuePayloadGPR, resultGPR);
- m_jit.store32(argCountIncludingThisGPR, JITCompiler::payloadFor(data->machineCount));
+ JITCompiler::Jump notCreated = m_jit.branch32(JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag));
- callOperation(operationLoadVarargs, data->machineStart.offset(), argumentsTagGPR, argumentsPayloadGPR, data->offset, GPRInfo::returnValueGPR, data->mandatoryMinimum);
+ addSlowPathGenerator(
+ slowPathCall(
+ notCreated, this, operationCreateActivation, resultGPR,
+ framePointerOffsetToGetActivationRegisters()));
- noResult(node);
+ cellResult(resultGPR, node);
break;
}
- case ForwardVarargs: {
- compileForwardVarargs(node);
+ case FunctionReentryWatchpoint: {
+ noResult(node);
break;
}
- case CreateActivation: {
- compileCreateActivation(node);
- break;
- }
+ case CreateArguments: {
+ JSValueOperand value(this, node->child1());
+ GPRTemporary result(this, Reuse, value, PayloadWord);
- case CreateDirectArguments: {
- compileCreateDirectArguments(node);
- break;
- }
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+ GPRReg resultGPR = result.gpr();
- case GetFromArguments: {
- compileGetFromArguments(node);
- break;
- }
+ m_jit.move(valuePayloadGPR, resultGPR);
- case PutToArguments: {
- compilePutToArguments(node);
- break;
- }
+ JITCompiler::Jump notCreated = m_jit.branch32(JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag));
- case CreateScopedArguments: {
- compileCreateScopedArguments(node);
+ if (node->codeOrigin.inlineCallFrame) {
+ addSlowPathGenerator(
+ slowPathCall(
+ notCreated, this, operationCreateInlinedArguments, resultGPR,
+ node->codeOrigin.inlineCallFrame));
+ } else {
+ addSlowPathGenerator(
+ slowPathCall(notCreated, this, operationCreateArguments, resultGPR));
+ }
+
+ cellResult(resultGPR, node);
break;
}
- case CreateClonedArguments: {
- compileCreateClonedArguments(node);
+ case TearOffActivation: {
+ JSValueOperand activationValue(this, node->child1());
+ GPRTemporary scratch(this);
+
+ GPRReg activationValueTagGPR = activationValue.tagGPR();
+ GPRReg activationValuePayloadGPR = activationValue.payloadGPR();
+ GPRReg scratchGPR = scratch.gpr();
+
+ JITCompiler::Jump notCreated = m_jit.branch32(JITCompiler::Equal, activationValueTagGPR, TrustedImm32(JSValue::EmptyValueTag));
+
+ SymbolTable* symbolTable = m_jit.symbolTableFor(node->codeOrigin);
+ int registersOffset = JSActivation::registersOffset(symbolTable);
+
+ int bytecodeCaptureStart = symbolTable->captureStart();
+ int machineCaptureStart = m_jit.graph().m_machineCaptureStart;
+ for (int i = symbolTable->captureCount(); i--;) {
+ m_jit.loadPtr(
+ JITCompiler::Address(
+ GPRInfo::callFrameRegister, (machineCaptureStart - i) * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
+ scratchGPR);
+ m_jit.storePtr(
+ scratchGPR, JITCompiler::Address(
+ activationValuePayloadGPR, registersOffset + (bytecodeCaptureStart - i) * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ m_jit.loadPtr(
+ JITCompiler::Address(
+ GPRInfo::callFrameRegister, (machineCaptureStart - i) * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)),
+ scratchGPR);
+ m_jit.storePtr(
+ scratchGPR, JITCompiler::Address(
+ activationValuePayloadGPR, registersOffset + (bytecodeCaptureStart - i) * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ }
+ m_jit.addPtr(TrustedImm32(registersOffset), activationValuePayloadGPR, scratchGPR);
+ m_jit.storePtr(scratchGPR, JITCompiler::Address(activationValuePayloadGPR, JSActivation::offsetOfRegisters()));
+
+ notCreated.link(&m_jit);
+ noResult(node);
break;
}
- case NewFunction:
- compileNewFunction(node);
- break;
+ case TearOffArguments: {
+ JSValueOperand unmodifiedArgumentsValue(this, node->child1());
+ JSValueOperand activationValue(this, node->child2());
+ GPRReg unmodifiedArgumentsValuePayloadGPR = unmodifiedArgumentsValue.payloadGPR();
+ GPRReg activationValuePayloadGPR = activationValue.payloadGPR();
- case In:
- compileIn(node);
+ JITCompiler::Jump created = m_jit.branchTest32(
+ JITCompiler::NonZero, unmodifiedArgumentsValuePayloadGPR);
+
+ if (node->codeOrigin.inlineCallFrame) {
+ addSlowPathGenerator(
+ slowPathCall(
+ created, this, operationTearOffInlinedArguments, NoResult,
+ unmodifiedArgumentsValuePayloadGPR, activationValuePayloadGPR, node->codeOrigin.inlineCallFrame));
+ } else {
+ addSlowPathGenerator(
+ slowPathCall(
+ created, this, operationTearOffArguments, NoResult,
+ unmodifiedArgumentsValuePayloadGPR, activationValuePayloadGPR));
+ }
+
+ noResult(node);
break;
-
- case StoreBarrier: {
- compileStoreBarrier(node);
+ }
+
+ case CheckArgumentsNotCreated: {
+ ASSERT(!isEmptySpeculation(
+ m_state.variables().operand(
+ m_jit.graph().argumentsRegisterFor(node->codeOrigin)).m_type));
+ speculationCheck(
+ Uncountable, JSValueRegs(), 0,
+ m_jit.branch32(
+ JITCompiler::NotEqual,
+ JITCompiler::tagFor(m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin)),
+ TrustedImm32(JSValue::EmptyValueTag)));
+ noResult(node);
break;
}
-
- case GetEnumerableLength: {
- SpeculateCellOperand enumerator(this, node->child1());
- GPRFlushedCallResult result(this);
+
+ case GetMyArgumentsLength: {
+ GPRTemporary result(this);
GPRReg resultGPR = result.gpr();
-
- m_jit.load32(MacroAssembler::Address(enumerator.gpr(), JSPropertyNameEnumerator::indexedLengthOffset()), resultGPR);
+
+ if (!isEmptySpeculation(
+ m_state.variables().operand(
+ m_jit.graph().argumentsRegisterFor(node->codeOrigin)).m_type)) {
+ speculationCheck(
+ ArgumentsEscaped, JSValueRegs(), 0,
+ m_jit.branch32(
+ JITCompiler::NotEqual,
+ JITCompiler::tagFor(m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin)),
+ TrustedImm32(JSValue::EmptyValueTag)));
+ }
+
+ ASSERT(!node->codeOrigin.inlineCallFrame);
+ m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultGPR);
+ m_jit.sub32(TrustedImm32(1), resultGPR);
int32Result(resultGPR, node);
break;
}
- case HasGenericProperty: {
- JSValueOperand base(this, node->child1());
- SpeculateCellOperand property(this, node->child2());
- GPRFlushedCallResult resultPayload(this);
- GPRFlushedCallResult2 resultTag(this);
- GPRReg basePayloadGPR = base.payloadGPR();
- GPRReg baseTagGPR = base.tagGPR();
- GPRReg resultPayloadGPR = resultPayload.gpr();
- GPRReg resultTagGPR = resultTag.gpr();
-
- flushRegisters();
- callOperation(operationHasGenericProperty, resultTagGPR, resultPayloadGPR, baseTagGPR, basePayloadGPR, property.gpr());
- booleanResult(resultPayloadGPR, node);
- break;
- }
- case HasStructureProperty: {
- JSValueOperand base(this, node->child1());
- SpeculateCellOperand property(this, node->child2());
- SpeculateCellOperand enumerator(this, node->child3());
+
+ case GetMyArgumentsLengthSafe: {
GPRTemporary resultPayload(this);
GPRTemporary resultTag(this);
-
- GPRReg baseTagGPR = base.tagGPR();
- GPRReg basePayloadGPR = base.payloadGPR();
- GPRReg propertyGPR = property.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
GPRReg resultTagGPR = resultTag.gpr();
-
- m_jit.load32(MacroAssembler::Address(basePayloadGPR, JSCell::structureIDOffset()), resultTagGPR);
- MacroAssembler::Jump wrongStructure = m_jit.branch32(MacroAssembler::NotEqual,
- resultTagGPR,
- MacroAssembler::Address(enumerator.gpr(), JSPropertyNameEnumerator::cachedStructureIDOffset()));
-
- moveTrueTo(resultPayloadGPR);
- MacroAssembler::Jump done = m_jit.jump();
-
- done.link(&m_jit);
-
- addSlowPathGenerator(slowPathCall(wrongStructure, this, operationHasGenericProperty, resultTagGPR, resultPayloadGPR, baseTagGPR, basePayloadGPR, propertyGPR));
- booleanResult(resultPayloadGPR, node);
+
+ JITCompiler::Jump created = m_jit.branch32(
+ JITCompiler::NotEqual,
+ JITCompiler::tagFor(m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin)),
+ TrustedImm32(JSValue::EmptyValueTag));
+
+ if (node->codeOrigin.inlineCallFrame) {
+ m_jit.move(
+ Imm32(node->codeOrigin.inlineCallFrame->arguments.size() - 1),
+ resultPayloadGPR);
+ } else {
+ m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultPayloadGPR);
+ m_jit.sub32(TrustedImm32(1), resultPayloadGPR);
+ }
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR);
+
+ // FIXME: the slow path generator should perform a forward speculation that the
+ // result is an integer. For now we postpone the speculation by having this return
+ // a JSValue.
+
+ addSlowPathGenerator(
+ slowPathCall(
+ created, this, operationGetArgumentsLength,
+ JSValueRegs(resultTagGPR, resultPayloadGPR),
+ m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin).offset()));
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, node);
break;
}
- case HasIndexedProperty: {
- SpeculateCellOperand base(this, node->child1());
- SpeculateInt32Operand index(this, node->child2());
+
+ case GetMyArgumentByVal: {
+ SpeculateStrictInt32Operand index(this, node->child1());
GPRTemporary resultPayload(this);
GPRTemporary resultTag(this);
-
- GPRReg baseGPR = base.gpr();
GPRReg indexGPR = index.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
GPRReg resultTagGPR = resultTag.gpr();
-
- MacroAssembler::JumpList slowCases;
- ArrayMode mode = node->arrayMode();
- switch (mode.type()) {
- case Array::Int32:
- case Array::Contiguous: {
- ASSERT(!!node->child3());
- StorageOperand storage(this, node->child3());
- GPRTemporary scratch(this);
-
- GPRReg storageGPR = storage.gpr();
- GPRReg scratchGPR = scratch.gpr();
-
- slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())));
- m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), scratchGPR);
- slowCases.append(m_jit.branch32(MacroAssembler::Equal, scratchGPR, TrustedImm32(JSValue::EmptyValueTag)));
- break;
+
+ if (!isEmptySpeculation(
+ m_state.variables().operand(
+ m_jit.graph().argumentsRegisterFor(node->codeOrigin)).m_type)) {
+ speculationCheck(
+ ArgumentsEscaped, JSValueRegs(), 0,
+ m_jit.branch32(
+ JITCompiler::NotEqual,
+ JITCompiler::tagFor(m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin)),
+ TrustedImm32(JSValue::EmptyValueTag)));
}
- case Array::Double: {
- ASSERT(!!node->child3());
- StorageOperand storage(this, node->child3());
- FPRTemporary scratch(this);
- FPRReg scratchFPR = scratch.fpr();
- GPRReg storageGPR = storage.gpr();
-
- slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())));
- m_jit.loadDouble(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchFPR);
- slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, scratchFPR, scratchFPR));
- break;
+
+ m_jit.add32(TrustedImm32(1), indexGPR, resultPayloadGPR);
+
+ if (node->codeOrigin.inlineCallFrame) {
+ speculationCheck(
+ Uncountable, JSValueRegs(), 0,
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual,
+ resultPayloadGPR,
+ Imm32(node->codeOrigin.inlineCallFrame->arguments.size())));
+ } else {
+ speculationCheck(
+ Uncountable, JSValueRegs(), 0,
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual,
+ resultPayloadGPR,
+ JITCompiler::payloadFor(JSStack::ArgumentCount)));
}
- case Array::ArrayStorage: {
- ASSERT(!!node->child3());
- StorageOperand storage(this, node->child3());
- GPRTemporary scratch(this);
+
+ JITCompiler::JumpList slowArgument;
+ JITCompiler::JumpList slowArgumentOutOfBounds;
+ if (m_jit.symbolTableFor(node->codeOrigin)->slowArguments()) {
+ RELEASE_ASSERT(!node->codeOrigin.inlineCallFrame);
+ const SlowArgument* slowArguments = m_jit.graph().m_slowArguments.get();
+ slowArgumentOutOfBounds.append(
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual, indexGPR,
+ Imm32(m_jit.symbolTableFor(node->codeOrigin)->parameterCount())));
- GPRReg storageGPR = storage.gpr();
- GPRReg scratchGPR = scratch.gpr();
+ COMPILE_ASSERT(sizeof(SlowArgument) == 8, SlowArgument_size_is_eight_bytes);
+ m_jit.move(ImmPtr(slowArguments), resultPayloadGPR);
+ m_jit.load32(
+ JITCompiler::BaseIndex(
+ resultPayloadGPR, indexGPR, JITCompiler::TimesEight,
+ OBJECT_OFFSETOF(SlowArgument, index)),
+ resultPayloadGPR);
- slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())));
- m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), scratchGPR);
- slowCases.append(m_jit.branch32(MacroAssembler::Equal, scratchGPR, TrustedImm32(JSValue::EmptyValueTag)));
- break;
- }
- default: {
- slowCases.append(m_jit.jump());
- break;
- }
+ m_jit.load32(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight,
+ OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
+ resultTagGPR);
+ m_jit.load32(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight,
+ OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)),
+ resultPayloadGPR);
+ slowArgument.append(m_jit.jump());
}
-
- moveTrueTo(resultPayloadGPR);
- MacroAssembler::Jump done = m_jit.jump();
-
- addSlowPathGenerator(slowPathCall(slowCases, this, operationHasIndexedProperty, resultTagGPR, resultPayloadGPR, baseGPR, indexGPR));
-
- done.link(&m_jit);
- booleanResult(resultPayloadGPR, node);
+ slowArgumentOutOfBounds.link(&m_jit);
+
+ m_jit.load32(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight,
+ m_jit.offsetOfArgumentsIncludingThis(node->codeOrigin) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
+ resultTagGPR);
+ m_jit.load32(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight,
+ m_jit.offsetOfArgumentsIncludingThis(node->codeOrigin) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)),
+ resultPayloadGPR);
+
+ slowArgument.link(&m_jit);
+ jsValueResult(resultTagGPR, resultPayloadGPR, node);
break;
}
- case GetDirectPname: {
- Edge& baseEdge = m_jit.graph().varArgChild(node, 0);
- Edge& propertyEdge = m_jit.graph().varArgChild(node, 1);
-
- SpeculateCellOperand base(this, baseEdge);
- SpeculateCellOperand property(this, propertyEdge);
- GPRReg baseGPR = base.gpr();
- GPRReg propertyGPR = property.gpr();
-
-#if CPU(X86)
- GPRFlushedCallResult resultPayload(this);
- GPRFlushedCallResult2 resultTag(this);
- GPRTemporary scratch(this);
-
- GPRReg resultTagGPR = resultTag.gpr();
- GPRReg resultPayloadGPR = resultPayload.gpr();
- GPRReg scratchGPR = scratch.gpr();
-
- // Not enough registers on X86 for this code, so always use the slow path.
- flushRegisters();
- m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag), scratchGPR);
- callOperation(operationGetByValCell, resultTagGPR, resultPayloadGPR, baseGPR, scratchGPR, propertyGPR);
-#else
+ case GetMyArgumentByValSafe: {
+ SpeculateStrictInt32Operand index(this, node->child1());
GPRTemporary resultPayload(this);
GPRTemporary resultTag(this);
- GPRTemporary scratch(this);
-
- GPRReg resultTagGPR = resultTag.gpr();
- GPRReg resultPayloadGPR = resultPayload.gpr();
- GPRReg scratchGPR = scratch.gpr();
-
- Edge& indexEdge = m_jit.graph().varArgChild(node, 2);
- Edge& enumeratorEdge = m_jit.graph().varArgChild(node, 3);
-
- SpeculateInt32Operand index(this, indexEdge);
- SpeculateCellOperand enumerator(this, enumeratorEdge);
-
GPRReg indexGPR = index.gpr();
- GPRReg enumeratorGPR = enumerator.gpr();
-
- // Check the structure
- m_jit.load32(MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), scratchGPR);
- MacroAssembler::Jump wrongStructure = m_jit.branch32(MacroAssembler::NotEqual,
- scratchGPR, MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedStructureIDOffset()));
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
- // Compute the offset
- // If index is less than the enumerator's cached inline storage, then it's an inline access
- MacroAssembler::Jump outOfLineAccess = m_jit.branch32(MacroAssembler::AboveOrEqual,
- indexGPR, MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
-
- m_jit.move(indexGPR, scratchGPR);
- m_jit.signExtend32ToPtr(scratchGPR, scratchGPR);
- m_jit.load32(MacroAssembler::BaseIndex(baseGPR, scratchGPR, MacroAssembler::TimesEight, JSObject::offsetOfInlineStorage() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagGPR);
- m_jit.load32(MacroAssembler::BaseIndex(baseGPR, scratchGPR, MacroAssembler::TimesEight, JSObject::offsetOfInlineStorage() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadGPR);
-
- MacroAssembler::Jump done = m_jit.jump();
+ JITCompiler::JumpList slowPath;
+ slowPath.append(
+ m_jit.branch32(
+ JITCompiler::NotEqual,
+ JITCompiler::tagFor(m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin)),
+ TrustedImm32(JSValue::EmptyValueTag)));
- // Otherwise it's out of line
- outOfLineAccess.link(&m_jit);
- m_jit.move(indexGPR, scratchGPR);
- m_jit.sub32(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), scratchGPR);
- m_jit.neg32(scratchGPR);
- m_jit.signExtend32ToPtr(scratchGPR, scratchGPR);
- // We use resultPayloadGPR as a temporary here. We have to make sure clobber it after getting the
- // value out of indexGPR and enumeratorGPR because resultPayloadGPR could reuse either of those registers.
- m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), resultPayloadGPR);
- int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
- m_jit.load32(MacroAssembler::BaseIndex(resultPayloadGPR, scratchGPR, MacroAssembler::TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagGPR);
- m_jit.load32(MacroAssembler::BaseIndex(resultPayloadGPR, scratchGPR, MacroAssembler::TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadGPR);
-
- done.link(&m_jit);
-
- addSlowPathGenerator(slowPathCall(wrongStructure, this, operationGetByValCell, resultTagGPR, resultPayloadGPR, baseGPR, propertyGPR));
-#endif
+ m_jit.add32(TrustedImm32(1), indexGPR, resultPayloadGPR);
+ if (node->codeOrigin.inlineCallFrame) {
+ slowPath.append(
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual,
+ resultPayloadGPR,
+ Imm32(node->codeOrigin.inlineCallFrame->arguments.size())));
+ } else {
+ slowPath.append(
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual,
+ resultPayloadGPR,
+ JITCompiler::payloadFor(JSStack::ArgumentCount)));
+ }
+
+ JITCompiler::JumpList slowArgument;
+ JITCompiler::JumpList slowArgumentOutOfBounds;
+ if (m_jit.symbolTableFor(node->codeOrigin)->slowArguments()) {
+ RELEASE_ASSERT(!node->codeOrigin.inlineCallFrame);
+ const SlowArgument* slowArguments = m_jit.graph().m_slowArguments.get();
+ slowArgumentOutOfBounds.append(
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual, indexGPR,
+ Imm32(m_jit.symbolTableFor(node->codeOrigin)->parameterCount())));
+ COMPILE_ASSERT(sizeof(SlowArgument) == 8, SlowArgument_size_is_eight_bytes);
+ m_jit.move(ImmPtr(slowArguments), resultPayloadGPR);
+ m_jit.load32(
+ JITCompiler::BaseIndex(
+ resultPayloadGPR, indexGPR, JITCompiler::TimesEight,
+ OBJECT_OFFSETOF(SlowArgument, index)),
+ resultPayloadGPR);
+ m_jit.load32(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight,
+ OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
+ resultTagGPR);
+ m_jit.load32(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight,
+ OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)),
+ resultPayloadGPR);
+ slowArgument.append(m_jit.jump());
+ }
+ slowArgumentOutOfBounds.link(&m_jit);
+
+ m_jit.load32(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight,
+ m_jit.offsetOfArgumentsIncludingThis(node->codeOrigin) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
+ resultTagGPR);
+ m_jit.load32(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight,
+ m_jit.offsetOfArgumentsIncludingThis(node->codeOrigin) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)),
+ resultPayloadGPR);
+
+ if (node->codeOrigin.inlineCallFrame) {
+ addSlowPathGenerator(
+ slowPathCall(
+ slowPath, this, operationGetInlinedArgumentByVal,
+ JSValueRegs(resultTagGPR, resultPayloadGPR),
+ m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin).offset(),
+ node->codeOrigin.inlineCallFrame, indexGPR));
+ } else {
+ addSlowPathGenerator(
+ slowPathCall(
+ slowPath, this, operationGetArgumentByVal,
+ JSValueRegs(resultTagGPR, resultPayloadGPR),
+ m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin).offset(),
+ indexGPR));
+ }
+
+ slowArgument.link(&m_jit);
jsValueResult(resultTagGPR, resultPayloadGPR, node);
break;
}
- case GetPropertyEnumerator: {
- SpeculateCellOperand base(this, node->child1());
- GPRFlushedCallResult result(this);
- GPRReg resultGPR = result.gpr();
-
- flushRegisters();
- callOperation(operationGetPropertyEnumerator, resultGPR, base.gpr());
- cellResult(resultGPR, node);
+
+ case NewFunctionNoCheck:
+ compileNewFunctionNoCheck(node);
break;
- }
- case GetEnumeratorStructurePname:
- case GetEnumeratorGenericPname: {
- SpeculateCellOperand enumerator(this, node->child1());
- SpeculateInt32Operand index(this, node->child2());
- GPRTemporary scratch(this);
- GPRTemporary resultPayload(this);
- GPRTemporary resultTag(this);
-
- GPRReg enumeratorGPR = enumerator.gpr();
- GPRReg indexGPR = index.gpr();
- GPRReg scratchGPR = scratch.gpr();
+
+ case NewFunction: {
+ JSValueOperand value(this, node->child1());
+ GPRTemporary resultTag(this, Reuse, value, TagWord);
+ GPRTemporary resultPayload(this, Reuse, value, PayloadWord);
+
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
GPRReg resultTagGPR = resultTag.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
-
- MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, indexGPR,
- MacroAssembler::Address(enumeratorGPR, (op == GetEnumeratorStructurePname)
- ? JSPropertyNameEnumerator::endStructurePropertyIndexOffset()
- : JSPropertyNameEnumerator::endGenericPropertyIndexOffset()));
-
- m_jit.move(MacroAssembler::TrustedImm32(JSValue::NullTag), resultTagGPR);
- m_jit.move(MacroAssembler::TrustedImm32(0), resultPayloadGPR);
-
- MacroAssembler::Jump done = m_jit.jump();
- inBounds.link(&m_jit);
-
- m_jit.loadPtr(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), scratchGPR);
- m_jit.loadPtr(MacroAssembler::BaseIndex(scratchGPR, indexGPR, MacroAssembler::ScalePtr), resultPayloadGPR);
- m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag), resultTagGPR);
-
- done.link(&m_jit);
+
+ m_jit.move(valuePayloadGPR, resultPayloadGPR);
+ m_jit.move(valueTagGPR, resultTagGPR);
+
+ JITCompiler::Jump notCreated = m_jit.branch32(JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag));
+
+ addSlowPathGenerator(
+ slowPathCall(
+ notCreated, this, operationNewFunction, JSValueRegs(resultTagGPR, resultPayloadGPR),
+ m_jit.codeBlock()->functionDecl(node->functionDeclIndex())));
+
jsValueResult(resultTagGPR, resultPayloadGPR, node);
break;
}
- case ToIndexString: {
- SpeculateInt32Operand index(this, node->child1());
- GPRFlushedCallResult result(this);
- GPRReg resultGPR = result.gpr();
-
- flushRegisters();
- callOperation(operationToIndexString, resultGPR, index.gpr());
- cellResult(resultGPR, node);
+
+ case NewFunctionExpression:
+ compileNewFunctionExpression(node);
break;
- }
- case ProfileType: {
- JSValueOperand value(this, node->child1());
- GPRTemporary scratch1(this);
- GPRTemporary scratch2(this);
- GPRTemporary scratch3(this);
-
- GPRReg scratch1GPR = scratch1.gpr();
- GPRReg scratch2GPR = scratch2.gpr();
- GPRReg scratch3GPR = scratch3.gpr();
-
- JITCompiler::Jump isTDZValue = m_jit.branch32(JITCompiler::Equal, value.tagGPR(), TrustedImm32(JSValue::EmptyValueTag));
-
- // Load the TypeProfilerLog into Scratch2.
- TypeProfilerLog* cachedTypeProfilerLog = m_jit.vm()->typeProfilerLog();
- m_jit.move(TrustedImmPtr(cachedTypeProfilerLog), scratch2GPR);
-
- // Load the next LogEntry into Scratch1.
- m_jit.loadPtr(MacroAssembler::Address(scratch2GPR, TypeProfilerLog::currentLogEntryOffset()), scratch1GPR);
-
- // Store the JSValue onto the log entry.
- m_jit.store32(value.tagGPR(), MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
- m_jit.store32(value.payloadGPR(), MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
-
- // Store the structureID of the cell if valueGPR is a cell, otherwise, store 0 on the log entry.
- MacroAssembler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs());
- m_jit.load32(MacroAssembler::Address(value.payloadGPR(), JSCell::structureIDOffset()), scratch3GPR);
- m_jit.store32(scratch3GPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::structureIDOffset()));
- MacroAssembler::Jump skipIsCell = m_jit.jump();
- isNotCell.link(&m_jit);
- m_jit.store32(TrustedImm32(0), MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::structureIDOffset()));
- skipIsCell.link(&m_jit);
-
- // Store the typeLocation on the log entry.
- TypeLocation* cachedTypeLocation = node->typeLocation();
- m_jit.move(TrustedImmPtr(cachedTypeLocation), scratch3GPR);
- m_jit.storePtr(scratch3GPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::locationOffset()));
-
- // Increment the current log entry.
- m_jit.addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), scratch1GPR);
- m_jit.storePtr(scratch1GPR, MacroAssembler::Address(scratch2GPR, TypeProfilerLog::currentLogEntryOffset()));
- MacroAssembler::Jump clearLog = m_jit.branchPtr(MacroAssembler::Equal, scratch1GPR, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()));
- addSlowPathGenerator(
- slowPathCall(clearLog, this, operationProcessTypeProfilerLogDFG, NoResult));
-
- isTDZValue.link(&m_jit);
-
- noResult(node);
+
+ case In:
+ compileIn(node);
break;
- }
- case ProfileControlFlow: {
- BasicBlockLocation* basicBlockLocation = node->basicBlockLocation();
- if (!basicBlockLocation->hasExecuted()) {
- GPRTemporary scratch1(this);
- basicBlockLocation->emitExecuteCode(m_jit, scratch1.gpr());
- }
- noResult(node);
+
+ case StoreBarrier:
+ case ConditionalStoreBarrier:
+ case StoreBarrierWithNullCheck: {
+ compileStoreBarrier(node);
break;
}
@@ -4667,12 +4654,11 @@ void SpeculativeJIT::compile(Node* node)
break;
case CheckWatchdogTimer:
- ASSERT(m_jit.vm()->watchdog);
speculationCheck(
WatchdogTimerFired, JSValueRegs(), 0,
m_jit.branchTest8(
JITCompiler::NonZero,
- JITCompiler::AbsoluteAddress(m_jit.vm()->watchdog->timerDidFireAddress())));
+ JITCompiler::AbsoluteAddress(m_jit.vm()->watchdog.timerDidFireAddress())));
break;
case CountExecution:
@@ -4680,7 +4666,6 @@ void SpeculativeJIT::compile(Node* node)
break;
case Phantom:
- case Check:
DFG_NODE_DO_TO_CHILDREN(m_jit.graph(), node, speculate);
noResult(node);
break;
@@ -4693,8 +4678,7 @@ void SpeculativeJIT::compile(Node* node)
// This is a no-op.
noResult(node);
break;
-
-
+
case Unreachable:
RELEASE_ASSERT_NOT_REACHED();
break;
@@ -4702,32 +4686,16 @@ void SpeculativeJIT::compile(Node* node)
case LastNodeType:
case Phi:
case Upsilon:
+ case GetArgument:
case ExtractOSREntryLocal:
case CheckTierUpInLoop:
case CheckTierUpAtReturn:
case CheckTierUpAndOSREnter:
- case CheckTierUpWithNestedTriggerAndOSREnter:
- case Int52Rep:
- case FiatInt52:
- case Int52Constant:
+ case Int52ToDouble:
+ case Int52ToValue:
case CheckInBounds:
case ArithIMul:
- case MultiGetByOffset:
- case MultiPutByOffset:
- case CheckBadCell:
- case BottomValue:
- case PhantomNewObject:
- case PhantomNewFunction:
- case PhantomCreateActivation:
- case PutHint:
- case CheckStructureImmediate:
- case MaterializeNewObject:
- case MaterializeCreateActivation:
- case PutStack:
- case KillStack:
- case GetStack:
- case GetMyArgumentByVal:
- DFG_CRASH(m_jit.graph(), node, "unexpected node in DFG backend");
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
@@ -4745,28 +4713,28 @@ void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueTagGPR, Edge valu
if (!isKnownCell(valueUse.node()))
isNotCell = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, JITCompiler::TrustedImm32(JSValue::CellTag));
- JITCompiler::Jump ownerIsRememberedOrInEden = m_jit.jumpIfIsRememberedOrInEden(ownerGPR);
+ JITCompiler::Jump definitelyNotMarked = genericWriteBarrier(m_jit, ownerGPR, scratch1, scratch2);
storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2);
- ownerIsRememberedOrInEden.link(&m_jit);
+ definitelyNotMarked.link(&m_jit);
if (!isKnownCell(valueUse.node()))
isNotCell.link(&m_jit);
}
-#endif // ENABLE(GGC)
-void SpeculativeJIT::moveTrueTo(GPRReg gpr)
+void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueTagGPR, Edge valueUse, GPRReg scratch1, GPRReg scratch2)
{
- m_jit.move(TrustedImm32(1), gpr);
-}
+ JITCompiler::Jump isNotCell;
+ if (!isKnownCell(valueUse.node()))
+ isNotCell = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, JITCompiler::TrustedImm32(JSValue::CellTag));
-void SpeculativeJIT::moveFalseTo(GPRReg gpr)
-{
- m_jit.move(TrustedImm32(0), gpr);
-}
+ JITCompiler::Jump definitelyNotMarked = genericWriteBarrier(m_jit, owner);
+ storeToWriteBarrierBuffer(owner, scratch1, scratch2);
+ definitelyNotMarked.link(&m_jit);
-void SpeculativeJIT::blessBoolean(GPRReg)
-{
+ if (!isKnownCell(valueUse.node()))
+ isNotCell.link(&m_jit);
}
+#endif // ENABLE(GGC)
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
index a3e9673d7..ea9f88613 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,23 +28,15 @@
#if ENABLE(DFG_JIT)
+#include "Arguments.h"
#include "ArrayPrototype.h"
#include "DFGAbstractInterpreterInlines.h"
#include "DFGCallArrayAllocatorSlowPathGenerator.h"
#include "DFGOperations.h"
#include "DFGSlowPathGenerator.h"
#include "Debugger.h"
-#include "DirectArguments.h"
-#include "GetterSetter.h"
-#include "JSCInlines.h"
-#include "JSEnvironmentRecord.h"
-#include "JSLexicalEnvironment.h"
-#include "JSPropertyNameEnumerator.h"
+#include "JSCJSValueInlines.h"
#include "ObjectPrototype.h"
-#include "SetupVarargsFrame.h"
-#include "SpillRegistersMode.h"
-#include "TypeProfilerLog.h"
-#include "Watchdog.h"
namespace JSC { namespace DFG {
@@ -86,9 +78,21 @@ GPRReg SpeculativeJIT::fillJSValue(Edge edge)
GPRReg gpr = allocate();
if (edge->hasConstant()) {
- JSValue jsValue = edge->asJSValue();
- m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
- info.fillJSValue(*m_stream, gpr, DataFormatJS);
+ if (isInt32Constant(edge.node())) {
+ info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
+ JSValue jsValue = jsNumber(valueOfInt32Constant(edge.node()));
+ m_jit.move(MacroAssembler::Imm64(JSValue::encode(jsValue)), gpr);
+ } else if (isNumberConstant(edge.node())) {
+ info.fillJSValue(*m_stream, gpr, DataFormatJSDouble);
+ JSValue jsValue(JSValue::EncodeAsDouble, valueOfNumberConstant(edge.node()));
+ m_jit.move(MacroAssembler::Imm64(JSValue::encode(jsValue)), gpr);
+ } else {
+ ASSERT(isJSConstant(edge.node()));
+ JSValue jsValue = valueOfJSConstant(edge.node());
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
+ info.fillJSValue(*m_stream, gpr, DataFormatJS);
+ }
+
m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
} else {
DataFormat spillFormat = info.spillFormat();
@@ -101,9 +105,21 @@ GPRReg SpeculativeJIT::fillJSValue(Edge edge)
break;
}
+ case DataFormatInt52:
+ case DataFormatStrictInt52: {
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
+ boxInt52(gpr, gpr, spillFormat);
+ return gpr;
+ }
+
default:
m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
- DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat & DataFormatJS);
+ if (spillFormat == DataFormatDouble) {
+ // Need to box the double, since we want a JSValue.
+ m_jit.sub64(GPRInfo::tagTypeNumberRegister, gpr);
+ spillFormat = DataFormatJSDouble;
+ } else
+ RELEASE_ASSERT(spillFormat & DataFormatJS);
break;
}
info.fillJSValue(*m_stream, gpr, spillFormat);
@@ -126,6 +142,28 @@ GPRReg SpeculativeJIT::fillJSValue(Edge edge)
return gpr;
}
+ case DataFormatDouble: {
+ FPRReg fpr = info.fpr();
+ GPRReg gpr = boxDouble(fpr);
+
+ // Update all info
+ info.fillJSValue(*m_stream, gpr, DataFormatJSDouble);
+ m_fprs.release(fpr);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderJS);
+
+ return gpr;
+ }
+
+ case DataFormatInt52:
+ case DataFormatStrictInt52: {
+ GPRReg gpr = info.gpr();
+ lock(gpr);
+ GPRReg resultGPR = allocate();
+ boxInt52(gpr, resultGPR, info.registerFormat());
+ unlock(gpr);
+ return resultGPR;
+ }
+
case DataFormatCell:
// No retag required on JSVALUE64!
case DataFormatJS:
@@ -140,13 +178,11 @@ GPRReg SpeculativeJIT::fillJSValue(Edge edge)
case DataFormatBoolean:
case DataFormatStorage:
- case DataFormatDouble:
- case DataFormatInt52:
// this type currently never occurs
- DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
+ RELEASE_ASSERT_NOT_REACHED();
default:
- DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format");
+ RELEASE_ASSERT_NOT_REACHED();
return InvalidGPRReg;
}
}
@@ -154,8 +190,8 @@ GPRReg SpeculativeJIT::fillJSValue(Edge edge)
void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
JITGetByIdGenerator gen(
- m_jit.codeBlock(), codeOrigin, usedRegisters(), JSValueRegs(baseGPR),
- JSValueRegs(resultGPR), spillMode);
+ m_jit.codeBlock(), codeOrigin, usedRegisters(), GPRInfo::callFrameRegister,
+ JSValueRegs(baseGPR), JSValueRegs(resultGPR), spillMode != NeedToSpill);
gen.generateFastPath(m_jit);
JITCompiler::JumpList slowCases;
@@ -163,20 +199,20 @@ void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg
slowCases.append(slowPathTarget);
slowCases.append(gen.slowPathJump());
- auto slowPath = slowPathCall(
+ OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
slowCases, this, operationGetByIdOptimize, resultGPR, gen.stubInfo(), baseGPR,
identifierUID(identifierNumber), spillMode);
m_jit.addGetById(gen, slowPath.get());
- addSlowPathGenerator(WTF::move(slowPath));
+ addSlowPathGenerator(slowPath.release());
}
-void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
+void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
{
JITPutByIdGenerator gen(
- m_jit.codeBlock(), codeOrigin, usedRegisters(), JSValueRegs(baseGPR),
- JSValueRegs(valueGPR), scratchGPR, spillMode, m_jit.ecmaModeFor(codeOrigin), putKind);
-
+ m_jit.codeBlock(), codeOrigin, usedRegisters(), GPRInfo::callFrameRegister,
+ JSValueRegs(baseGPR), JSValueRegs(valueGPR), scratchGPR, false,
+ m_jit.ecmaModeFor(codeOrigin), putKind);
gen.generateFastPath(m_jit);
JITCompiler::JumpList slowCases;
@@ -184,12 +220,12 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg
slowCases.append(slowPathTarget);
slowCases.append(gen.slowPathJump());
- auto slowPath = slowPathCall(
+ OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
slowCases, this, gen.slowPathFunction(), NoResult, gen.stubInfo(), valueGPR, baseGPR,
identifierUID(identifierNumber));
m_jit.addPutById(gen, slowPath.get());
- addSlowPathGenerator(WTF::move(slowPath));
+ addSlowPathGenerator(slowPath.release());
}
void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert)
@@ -205,22 +241,19 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv
JITCompiler::Jump notMasqueradesAsUndefined;
if (masqueradesAsUndefinedWatchpointIsStillValid()) {
if (!isKnownCell(operand.node()))
- notCell = m_jit.branchIfNotCell(JSValueRegs(argGPR));
+ notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultGPR);
notMasqueradesAsUndefined = m_jit.jump();
} else {
GPRTemporary localGlobalObject(this);
GPRTemporary remoteGlobalObject(this);
- GPRTemporary scratch(this);
if (!isKnownCell(operand.node()))
- notCell = m_jit.branchIfNotCell(JSValueRegs(argGPR));
-
- JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(
- JITCompiler::NonZero,
- JITCompiler::Address(argGPR, JSCell::typeInfoFlagsOffset()),
- JITCompiler::TrustedImm32(MasqueradesAsUndefined));
+ notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
+
+ m_jit.loadPtr(JITCompiler::Address(argGPR, JSCell::structureOffset()), resultGPR);
+ JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined));
m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultGPR);
notMasqueradesAsUndefined = m_jit.jump();
@@ -228,8 +261,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv
isMasqueradesAsUndefined.link(&m_jit);
GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
- m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
- m_jit.emitLoadStructure(argGPR, resultGPR, scratch.gpr());
+ m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(operand->codeOrigin)), localGlobalObjectGPR);
m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
m_jit.comparePtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultGPR);
}
@@ -254,8 +286,8 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv
void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branchNode, bool invert)
{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+ BasicBlock* taken = branchNode->takenBlock();
+ BasicBlock* notTaken = branchNode->notTakenBlock();
if (taken == nextBlock()) {
invert = !invert;
@@ -274,26 +306,22 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branch
if (masqueradesAsUndefinedWatchpointIsStillValid()) {
if (!isKnownCell(operand.node()))
- notCell = m_jit.branchIfNotCell(JSValueRegs(argGPR));
-
+ notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
+
jump(invert ? taken : notTaken, ForceJump);
} else {
GPRTemporary localGlobalObject(this);
GPRTemporary remoteGlobalObject(this);
- GPRTemporary scratch(this);
if (!isKnownCell(operand.node()))
- notCell = m_jit.branchIfNotCell(JSValueRegs(argGPR));
-
- branchTest8(JITCompiler::Zero,
- JITCompiler::Address(argGPR, JSCell::typeInfoFlagsOffset()),
- JITCompiler::TrustedImm32(MasqueradesAsUndefined),
- invert ? taken : notTaken);
+ notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
+
+ m_jit.loadPtr(JITCompiler::Address(argGPR, JSCell::structureOffset()), resultGPR);
+ branchTest8(JITCompiler::Zero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), invert ? taken : notTaken);
GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
- m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
- m_jit.emitLoadStructure(argGPR, resultGPR, scratch.gpr());
+ m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(operand->codeOrigin)), localGlobalObjectGPR);
m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, invert ? notTaken : taken);
}
@@ -317,7 +345,7 @@ bool SpeculativeJIT::nonSpeculativeCompareNull(Node* node, Edge operand, bool in
if (branchIndexInBlock != UINT_MAX) {
Node* branchNode = m_block->at(branchIndexInBlock);
- DFG_ASSERT(m_jit.graph(), node, node->adjustedRefCount() == 1);
+ RELEASE_ASSERT(node->adjustedRefCount() == 1);
nonSpeculativePeepholeBranchNull(operand, branchNode, invert);
@@ -336,8 +364,8 @@ bool SpeculativeJIT::nonSpeculativeCompareNull(Node* node, Edge operand, bool in
void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+ BasicBlock* taken = branchNode->takenBlock();
+ BasicBlock* notTaken = branchNode->notTakenBlock();
JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
@@ -359,7 +387,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode,
JITCompiler::JumpList slowPath;
if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultGPR = result.gpr();
arg1.use();
@@ -442,7 +470,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler
JITCompiler::JumpList slowPath;
if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultGPR = result.gpr();
arg1.use();
@@ -469,8 +497,9 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler
m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
- addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>>(
- slowPath, this, helperFunction, resultGPR, arg1GPR, arg2GPR));
+ addSlowPathGenerator(adoptPtr(
+ new CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>(
+ slowPath, this, helperFunction, resultGPR, arg1GPR, arg2GPR)));
}
jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
@@ -479,8 +508,8 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler
void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert)
{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+ BasicBlock* taken = branchNode->takenBlock();
+ BasicBlock* notTaken = branchNode->notTakenBlock();
// The branch instruction will branch to the taken block.
// If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
@@ -600,9 +629,11 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert)
m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
- addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<MacroAssembler::JumpList>>(
+ addSlowPathGenerator(
+ adoptPtr(
+ new CompareAndBoxBooleanSlowPathGenerator<MacroAssembler::JumpList>(
slowPathCases, this, operationCompareStrictEq, resultGPR, arg1GPR,
- arg2GPR));
+ arg2GPR)));
done.link(&m_jit);
}
@@ -610,181 +641,65 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert)
jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
}
-void SpeculativeJIT::compileMiscStrictEq(Node* node)
-{
- JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
- JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
- GPRTemporary result(this);
-
- if (node->child1().useKind() == MiscUse)
- speculateMisc(node->child1(), op1.jsValueRegs());
- if (node->child2().useKind() == MiscUse)
- speculateMisc(node->child2(), op2.jsValueRegs());
-
- m_jit.compare64(JITCompiler::Equal, op1.gpr(), op2.gpr(), result.gpr());
- m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
- jsValueResult(result.gpr(), node, DataFormatJSBoolean);
-}
-
void SpeculativeJIT::emitCall(Node* node)
{
- CallLinkInfo::CallType callType;
- bool isVarargs = false;
- bool isForwardVarargs = false;
- switch (node->op()) {
- case Call:
- callType = CallLinkInfo::Call;
- break;
- case Construct:
- callType = CallLinkInfo::Construct;
- break;
- case CallVarargs:
- callType = CallLinkInfo::CallVarargs;
- isVarargs = true;
- break;
- case ConstructVarargs:
- callType = CallLinkInfo::ConstructVarargs;
- isVarargs = true;
- break;
- case CallForwardVarargs:
- callType = CallLinkInfo::CallVarargs;
- isForwardVarargs = true;
- break;
- case ConstructForwardVarargs:
- callType = CallLinkInfo::ConstructVarargs;
- isForwardVarargs = true;
- break;
- default:
- DFG_CRASH(m_jit.graph(), node, "bad node type");
- break;
- }
+ if (node->op() != Call)
+ RELEASE_ASSERT(node->op() == Construct);
- Edge calleeEdge = m_jit.graph().child(node, 0);
+ // For constructors, the this argument is not passed but we have to make space
+ // for it.
+ int dummyThisArgument = node->op() == Call ? 0 : 1;
- // Gotta load the arguments somehow. Varargs is trickier.
- if (isVarargs || isForwardVarargs) {
- CallVarargsData* data = node->callVarargsData();
-
- GPRReg resultGPR;
- unsigned numUsedStackSlots = m_jit.graph().m_nextMachineLocal;
-
- if (isForwardVarargs) {
- flushRegisters();
- use(node->child2());
-
- GPRReg scratchGPR1;
- GPRReg scratchGPR2;
- GPRReg scratchGPR3;
-
- scratchGPR1 = JITCompiler::selectScratchGPR();
- scratchGPR2 = JITCompiler::selectScratchGPR(scratchGPR1);
- scratchGPR3 = JITCompiler::selectScratchGPR(scratchGPR1, scratchGPR2);
-
- m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR2);
- JITCompiler::JumpList slowCase;
- emitSetupVarargsFrameFastCase(m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase);
- JITCompiler::Jump done = m_jit.jump();
- slowCase.link(&m_jit);
- callOperation(operationThrowStackOverflowForVarargs);
- m_jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
- done.link(&m_jit);
- resultGPR = scratchGPR2;
- } else {
- GPRReg argumentsGPR;
- GPRReg scratchGPR1;
- GPRReg scratchGPR2;
- GPRReg scratchGPR3;
-
- auto loadArgumentsGPR = [&] (GPRReg reservedGPR) {
- if (reservedGPR != InvalidGPRReg)
- lock(reservedGPR);
- JSValueOperand arguments(this, node->child2());
- argumentsGPR = arguments.gpr();
- if (reservedGPR != InvalidGPRReg)
- unlock(reservedGPR);
- flushRegisters();
-
- scratchGPR1 = JITCompiler::selectScratchGPR(argumentsGPR, reservedGPR);
- scratchGPR2 = JITCompiler::selectScratchGPR(argumentsGPR, scratchGPR1, reservedGPR);
- scratchGPR3 = JITCompiler::selectScratchGPR(argumentsGPR, scratchGPR1, scratchGPR2, reservedGPR);
- };
-
- loadArgumentsGPR(InvalidGPRReg);
-
- DFG_ASSERT(m_jit.graph(), node, isFlushed());
-
- // Right now, arguments is in argumentsGPR and the register file is flushed.
- callOperation(operationSizeFrameForVarargs, GPRInfo::returnValueGPR, argumentsGPR, numUsedStackSlots, data->firstVarArgOffset);
-
- // Now we have the argument count of the callee frame, but we've lost the arguments operand.
- // Reconstruct the arguments operand while preserving the callee frame.
- loadArgumentsGPR(GPRInfo::returnValueGPR);
- m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR1);
- emitSetVarargsFrame(m_jit, GPRInfo::returnValueGPR, false, scratchGPR1, scratchGPR1);
- m_jit.addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), scratchGPR1, JITCompiler::stackPointerRegister);
-
- callOperation(operationSetupVarargsFrame, GPRInfo::returnValueGPR, scratchGPR1, argumentsGPR, data->firstVarArgOffset, GPRInfo::returnValueGPR);
- resultGPR = GPRInfo::returnValueGPR;
- }
-
- m_jit.addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), resultGPR, JITCompiler::stackPointerRegister);
-
- DFG_ASSERT(m_jit.graph(), node, isFlushed());
-
- // We don't need the arguments array anymore.
- if (isVarargs)
- use(node->child2());
-
- // Now set up the "this" argument.
- JSValueOperand thisArgument(this, node->child3());
- GPRReg thisArgumentGPR = thisArgument.gpr();
- thisArgument.use();
-
- m_jit.store64(thisArgumentGPR, JITCompiler::calleeArgumentSlot(0));
- } else {
- // The call instruction's first child is the function; the subsequent children are the
- // arguments.
- int numPassedArgs = node->numChildren() - 1;
-
- m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), JITCompiler::calleeFramePayloadSlot(JSStack::ArgumentCount));
+ CallLinkInfo::CallType callType = node->op() == Call ? CallLinkInfo::Call : CallLinkInfo::Construct;
- for (int i = 0; i < numPassedArgs; i++) {
- Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i];
- JSValueOperand arg(this, argEdge);
- GPRReg argGPR = arg.gpr();
- use(argEdge);
-
- m_jit.store64(argGPR, JITCompiler::calleeArgumentSlot(i));
- }
- }
-
+ Edge calleeEdge = m_jit.graph().m_varArgChildren[node->firstChild()];
JSValueOperand callee(this, calleeEdge);
GPRReg calleeGPR = callee.gpr();
- callee.use();
- m_jit.store64(calleeGPR, JITCompiler::calleeFrameSlot(JSStack::Callee));
+ use(calleeEdge);
+
+ // The call instruction's first child is the function; the subsequent children are the
+ // arguments.
+ int numPassedArgs = node->numChildren() - 1;
+ int numArgs = numPassedArgs + dummyThisArgument;
+
+ m_jit.store32(MacroAssembler::TrustedImm32(numArgs), calleeFramePayloadSlot(numArgs, JSStack::ArgumentCount));
+ m_jit.store64(GPRInfo::callFrameRegister, calleeFrameCallerFrame(numArgs));
+ m_jit.store64(calleeGPR, calleeFrameSlot(numArgs, JSStack::Callee));
+
+ for (int i = 0; i < numPassedArgs; i++) {
+ Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i];
+ JSValueOperand arg(this, argEdge);
+ GPRReg argGPR = arg.gpr();
+ use(argEdge);
+
+ m_jit.store64(argGPR, calleeArgumentSlot(numArgs, i + dummyThisArgument));
+ }
+
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultGPR = result.gpr();
JITCompiler::DataLabelPtr targetToCheck;
- JITCompiler::Jump slowPath;
+ JITCompiler::JumpList slowPath;
- m_jit.emitStoreCodeOrigin(node->origin.semantic);
+ m_jit.emitStoreCodeOrigin(node->codeOrigin);
- CallLinkInfo* callLinkInfo = m_jit.codeBlock()->addCallLinkInfo();
+ m_jit.addPtr(TrustedImm32(calleeFrameOffset(numArgs)), GPRInfo::callFrameRegister);
- slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(0));
+ slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(0)));
- JITCompiler::Call fastCall = m_jit.nearCall();
+ m_jit.loadPtr(MacroAssembler::Address(calleeGPR, OBJECT_OFFSETOF(JSFunction, m_scope)), resultGPR);
+ m_jit.store64(resultGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain));
+ JITCompiler::Call fastCall = m_jit.nearCall();
+
JITCompiler::Jump done = m_jit.jump();
slowPath.link(&m_jit);
m_jit.move(calleeGPR, GPRInfo::regT0); // Callee needs to be in regT0
- m_jit.move(MacroAssembler::TrustedImmPtr(callLinkInfo), GPRInfo::regT2); // Link info needs to be in regT2
JITCompiler::Call slowCall = m_jit.nearCall();
done.link(&m_jit);
@@ -793,12 +708,7 @@ void SpeculativeJIT::emitCall(Node* node)
jsValueResult(resultGPR, m_currentNode, DataFormatJS, UseChildrenCalledExplicitly);
- callLinkInfo->setUpCall(callType, m_currentNode->origin.semantic, calleeGPR);
- m_jit.addJSCall(fastCall, slowCall, targetToCheck, callLinkInfo);
-
- // If we were varargs, then after the calls are done, we need to reestablish our stack pointer.
- if (isVarargs || isForwardVarargs)
- m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister);
+ m_jit.addJSCall(fastCall, slowCall, targetToCheck, callType, calleeGPR, m_currentNode->codeOrigin);
}
// Clang should allow unreachable [[clang::fallthrough]] in template functions if any template expansion uses it
@@ -815,25 +725,24 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnF
AbstractValue& value = m_state.forNode(edge);
SpeculatedType type = value.m_type;
ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32));
-
m_interpreter.filter(value, SpecInt32);
- if (value.isClear()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
- returnFormat = DataFormatInt32;
- return allocate();
- }
-
VirtualRegister virtualRegister = edge->virtualRegister();
GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
switch (info.registerFormat()) {
case DataFormatNone: {
+ if ((edge->hasConstant() && !isInt32Constant(edge.node())) || info.spillFormat() == DataFormatDouble) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ returnFormat = DataFormatInt32;
+ return allocate();
+ }
+
GPRReg gpr = allocate();
if (edge->hasConstant()) {
m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
- ASSERT(edge->isInt32Constant());
- m_jit.move(MacroAssembler::Imm32(edge->asInt32()), gpr);
+ ASSERT(isInt32Constant(edge.node()));
+ m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr);
info.fillInt32(*m_stream, gpr);
returnFormat = DataFormatInt32;
return gpr;
@@ -841,7 +750,7 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnF
DataFormat spillFormat = info.spillFormat();
- DFG_ASSERT(m_jit.graph(), m_currentNode, (spillFormat & DataFormatJS) || spillFormat == DataFormatInt32);
+ RELEASE_ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInt32 || spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
@@ -862,6 +771,36 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnF
returnFormat = DataFormatJSInt32;
return gpr;
}
+ if (spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52) {
+ // Generally, this can only happen if we've already proved that the
+ // value is an int32. That's because if a value originated as a JSValue
+ // then we would speculate that it's an int32 before representing it as
+ // an int52. Otherwise, if we knowingly produced an int52, then we would
+ // be boxing it into a value using Int52ToValue. This assertion is valid
+ // only because Int52 is something that we introduce at prediction time.
+ // However: we may have an int32-producing node replaced by an
+ // int52-producing node due to CSE. So we must do a check.
+ RELEASE_ASSERT(!(type & ~SpecMachineInt));
+ if (type & SpecInt52) {
+ GPRReg temp = allocate();
+ m_jit.signExtend32ToPtr(gpr, temp);
+ // Currently, we can't supply value profiling information here. :-/
+ speculationCheck(
+ BadType, JSValueRegs(), 0,
+ m_jit.branch64(MacroAssembler::NotEqual, gpr, temp));
+ unlock(temp);
+ }
+ if (spillFormat == DataFormatStrictInt52)
+ m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
+ else {
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
+ m_jit.zeroExtend32ToPtr(gpr, gpr);
+ }
+ info.fillInt32(*m_stream, gpr);
+ returnFormat = DataFormatInt32;
+ return gpr;
+ }
m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
// Fill as JSValue, and fall through.
@@ -871,7 +810,7 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnF
}
case DataFormatJS: {
- DFG_ASSERT(m_jit.graph(), m_currentNode, !(type & SpecInt52));
+ RELEASE_ASSERT(!(type & SpecInt52));
// Check the value is an integer.
GPRReg gpr = info.gpr();
m_gprs.lock(gpr);
@@ -920,19 +859,61 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnF
return gpr;
}
- case DataFormatJSDouble:
+ case DataFormatStrictInt52:
+ case DataFormatInt52: {
+ GPRReg gpr = info.gpr();
+ GPRReg result;
+ DataFormat oldFormat = info.registerFormat();
+ if (m_gprs.isLocked(gpr)) {
+ result = allocate();
+ m_jit.move(gpr, result);
+ } else {
+ lock(gpr);
+ info.fillInt32(*m_stream, gpr);
+ result = gpr;
+ }
+ RELEASE_ASSERT(!(type & ~SpecMachineInt));
+ if (oldFormat == DataFormatInt52)
+ m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), result);
+ if (type & SpecInt52) {
+ GPRReg temp = allocate();
+ m_jit.signExtend32ToPtr(result, temp);
+ // Currently, we can't supply value profiling information here. :-/
+ speculationCheck(
+ BadType, JSValueRegs(), 0,
+ m_jit.branch64(MacroAssembler::NotEqual, result, temp));
+ unlock(temp);
+ }
+ m_jit.zeroExtend32ToPtr(result, result);
+ returnFormat = DataFormatInt32;
+ return gpr;
+ }
+
+ case DataFormatDouble:
+ case DataFormatJSDouble: {
+ if (edge->hasConstant() && isInt32Constant(edge.node())) {
+ GPRReg gpr = allocate();
+ ASSERT(isInt32Constant(edge.node()));
+ m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr);
+ returnFormat = DataFormatInt32;
+ return gpr;
+ }
+ FALLTHROUGH;
+ }
case DataFormatCell:
case DataFormatBoolean:
case DataFormatJSCell:
- case DataFormatJSBoolean:
- case DataFormatDouble:
+ case DataFormatJSBoolean: {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ returnFormat = DataFormatInt32;
+ return allocate();
+ }
+
case DataFormatStorage:
- case DataFormatInt52:
- case DataFormatStrictInt52:
- DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
+ RELEASE_ASSERT_NOT_REACHED();
default:
- DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format");
+ RELEASE_ASSERT_NOT_REACHED();
return InvalidGPRReg;
}
}
@@ -949,7 +930,7 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Strict(Edge edge)
{
DataFormat mustBeDataFormatInt32;
GPRReg result = fillSpeculateInt32Internal<true>(edge, mustBeDataFormatInt32);
- DFG_ASSERT(m_jit.graph(), m_currentNode, mustBeDataFormatInt32 == DataFormatInt32);
+ RELEASE_ASSERT(mustBeDataFormatInt32 == DataFormatInt32);
return result;
}
@@ -957,22 +938,22 @@ GPRReg SpeculativeJIT::fillSpeculateInt52(Edge edge, DataFormat desiredFormat)
{
ASSERT(desiredFormat == DataFormatInt52 || desiredFormat == DataFormatStrictInt52);
AbstractValue& value = m_state.forNode(edge);
-
+ SpeculatedType type = value.m_type;
m_interpreter.filter(value, SpecMachineInt);
- if (value.isClear()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
- return allocate();
- }
-
VirtualRegister virtualRegister = edge->virtualRegister();
GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
switch (info.registerFormat()) {
case DataFormatNone: {
+ if ((edge->hasConstant() && !valueOfJSConstant(edge.node()).isMachineInt()) || info.spillFormat() == DataFormatDouble) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ return allocate();
+ }
+
GPRReg gpr = allocate();
if (edge->hasConstant()) {
- JSValue jsValue = edge->asJSValue();
+ JSValue jsValue = valueOfJSConstant(edge.node());
ASSERT(jsValue.isMachineInt());
m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
int64_t value = jsValue.asMachineInt();
@@ -985,21 +966,82 @@ GPRReg SpeculativeJIT::fillSpeculateInt52(Edge edge, DataFormat desiredFormat)
DataFormat spillFormat = info.spillFormat();
- DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52);
+ RELEASE_ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInt32 || spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
- if (desiredFormat == DataFormatStrictInt52) {
- if (spillFormat == DataFormatInt52)
- m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
- info.fillStrictInt52(*m_stream, gpr);
+ if (spillFormat == DataFormatJSInt32 || spillFormat == DataFormatInt32) {
+ // If we know this was spilled as an integer we can fill without checking.
+ m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.signExtend32ToPtr(gpr, gpr);
+ if (desiredFormat == DataFormatStrictInt52) {
+ info.fillStrictInt52(*m_stream, gpr);
+ return gpr;
+ }
+ m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
+ info.fillInt52(*m_stream, gpr);
return gpr;
}
- if (spillFormat == DataFormatStrictInt52)
- m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
- info.fillInt52(*m_stream, gpr);
- return gpr;
+ if (spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52) {
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
+ if (desiredFormat == DataFormatStrictInt52) {
+ if (spillFormat == DataFormatInt52)
+ m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
+ info.fillStrictInt52(*m_stream, gpr);
+ return gpr;
+ }
+ if (spillFormat == DataFormatStrictInt52)
+ m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
+ info.fillInt52(*m_stream, gpr);
+ return gpr;
+ }
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
+
+ // Fill as JSValue, and fall through.
+ info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
+ m_gprs.unlock(gpr);
+ FALLTHROUGH;
+ }
+
+ case DataFormatJS: {
+ // Check the value is an integer. Note that we would *like* to unbox an Int52
+ // at this point but this is too costly. We only *prove* that this is an Int52
+ // even though we check if it's an int32.
+ GPRReg gpr = info.gpr();
+ GPRReg result;
+ if (m_gprs.isLocked(gpr)) {
+ result = allocate();
+ m_jit.move(gpr, result);
+ } else {
+ m_gprs.lock(gpr);
+ result = gpr;
+ }
+ if (type & ~SpecInt32)
+ speculationCheck(BadType, JSValueRegs(result), edge, m_jit.branch64(MacroAssembler::Below, result, GPRInfo::tagTypeNumberRegister));
+ if (result == gpr) // The not-already-locked, so fill in-place, case.
+ info.fillInt52(*m_stream, gpr, desiredFormat);
+ m_jit.signExtend32ToPtr(result, result);
+ if (desiredFormat == DataFormatInt52)
+ m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), result);
+ return result;
+ }
+
+ case DataFormatInt32:
+ case DataFormatJSInt32: {
+ GPRReg gpr = info.gpr();
+ GPRReg result;
+ if (m_gprs.isLocked(gpr)) {
+ result = allocate();
+ m_jit.move(gpr, result);
+ } else {
+ m_gprs.lock(gpr);
+ info.fillInt52(*m_stream, gpr, desiredFormat);
+ result = gpr;
+ }
+ m_jit.signExtend32ToPtr(result, result);
+ if (desiredFormat == DataFormatInt52)
+ m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), result);
+ return result;
}
case DataFormatStrictInt52: {
@@ -1036,16 +1078,43 @@ GPRReg SpeculativeJIT::fillSpeculateInt52(Edge edge, DataFormat desiredFormat)
return gpr;
}
+ case DataFormatDouble:
+ case DataFormatJSDouble:
+ if (edge->hasConstant()) {
+ JSValue jsValue = valueOfJSConstant(edge.node());
+ if (jsValue.isMachineInt()) {
+ int64_t value = jsValue.asMachineInt();
+ if (desiredFormat == DataFormatInt52)
+ value = value << JSValue::int52ShiftAmount;
+ GPRReg gpr = allocate();
+ m_jit.move(MacroAssembler::Imm64(value), gpr);
+ return gpr;
+ }
+ }
+ FALLTHROUGH;
+ case DataFormatCell:
+ case DataFormatBoolean:
+ case DataFormatJSCell:
+ case DataFormatJSBoolean: {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ return allocate();
+ }
+
+ case DataFormatStorage:
+ RELEASE_ASSERT_NOT_REACHED();
+
default:
- DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
+ RELEASE_ASSERT_NOT_REACHED();
return InvalidGPRReg;
}
}
FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge)
{
- ASSERT(edge.useKind() == DoubleRepUse || edge.useKind() == DoubleRepRealUse || edge.useKind() == DoubleRepMachineIntUse);
- ASSERT(edge->hasDoubleResult());
+ AbstractValue& value = m_state.forNode(edge);
+ SpeculatedType type = value.m_type;
+ ASSERT(edge.useKind() != KnownNumberUse || !(value.m_type & ~SpecFullNumber));
+ m_interpreter.filter(value, SpecFullNumber);
VirtualRegister virtualRegister = edge->virtualRegister();
GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
@@ -1053,9 +1122,20 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge)
if (edge->hasConstant()) {
GPRReg gpr = allocate();
- if (edge->isNumberConstant()) {
+ if (isInt32Constant(edge.node())) {
FPRReg fpr = fprAllocate();
- m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(edge->asNumber())), gpr);
+ m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(static_cast<double>(valueOfInt32Constant(edge.node())))), gpr);
+ m_jit.move64ToDouble(gpr, fpr);
+ unlock(gpr);
+
+ // Don't fill double here since that will lead to confusion: the
+ // register allocator will now think that this is a double while
+ // everyone else thinks it's an integer.
+ return fpr;
+ }
+ if (isNumberConstant(edge.node())) {
+ FPRReg fpr = fprAllocate();
+ m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(edge.node()))), gpr);
m_jit.move64ToDouble(gpr, fpr);
unlock(gpr);
@@ -1068,24 +1148,159 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge)
}
DataFormat spillFormat = info.spillFormat();
- if (spillFormat != DataFormatDouble) {
- DFG_CRASH(
- m_jit.graph(), m_currentNode, toCString(
- "Expected ", edge, " to have double format but instead it is spilled as ",
- dataFormatToString(spillFormat)).data());
+ switch (spillFormat) {
+ case DataFormatDouble: {
+ FPRReg fpr = fprAllocate();
+ m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+ info.fillDouble(*m_stream, fpr);
+ return fpr;
+ }
+
+ case DataFormatInt32: {
+ GPRReg gpr = allocate();
+
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
+ info.fillInt32(*m_stream, gpr);
+ unlock(gpr);
+ break;
}
- DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat == DataFormatDouble);
+
+ case DataFormatInt52: {
+ GPRReg gpr = allocate();
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
+ info.fillInt52(*m_stream, gpr);
+ unlock(gpr);
+ break;
+ }
+
+ case DataFormatStrictInt52: {
+ GPRReg gpr = allocate();
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
+ info.fillStrictInt52(*m_stream, gpr);
+ unlock(gpr);
+ break;
+ }
+
+ default:
+ GPRReg gpr = allocate();
+
+ RELEASE_ASSERT(spillFormat & DataFormatJS);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
+ info.fillJSValue(*m_stream, gpr, spillFormat);
+ unlock(gpr);
+ break;
+ }
+ }
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: // Should have filled, above.
+ case DataFormatBoolean: // This type never occurs.
+ case DataFormatStorage:
+ RELEASE_ASSERT_NOT_REACHED();
+
+ case DataFormatCell:
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ return fprAllocate();
+
+ case DataFormatJSCell:
+ case DataFormatJS:
+ case DataFormatJSBoolean: {
+ GPRReg jsValueGpr = info.gpr();
+ m_gprs.lock(jsValueGpr);
+ FPRReg fpr = fprAllocate();
+ GPRReg tempGpr = allocate();
+
+ JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
+
+ if (type & ~SpecFullNumber)
+ speculationCheck(BadType, JSValueRegs(jsValueGpr), edge, m_jit.branchTest64(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister));
+
+ // First, if we get here we have a double encoded as a JSValue
+ m_jit.move(jsValueGpr, tempGpr);
+ unboxDouble(tempGpr, fpr);
+ JITCompiler::Jump hasUnboxedDouble = m_jit.jump();
+
+ // Finally, handle integers.
+ isInteger.link(&m_jit);
+ m_jit.convertInt32ToDouble(jsValueGpr, fpr);
+ hasUnboxedDouble.link(&m_jit);
+
+ m_gprs.release(jsValueGpr);
+ m_gprs.unlock(jsValueGpr);
+ m_gprs.unlock(tempGpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+ info.fillDouble(*m_stream, fpr);
+ info.killSpilled();
+ return fpr;
+ }
+
+ case DataFormatJSInt32:
+ case DataFormatInt32: {
+ FPRReg fpr = fprAllocate();
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ m_jit.convertInt32ToDouble(gpr, fpr);
+ m_gprs.unlock(gpr);
+ return fpr;
+ }
+
+ case DataFormatInt52: {
+ FPRReg fpr = fprAllocate();
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ GPRReg temp = allocate();
+ m_jit.move(gpr, temp);
+ m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), temp);
+ m_jit.convertInt64ToDouble(temp, fpr);
+ unlock(temp);
+ m_gprs.unlock(gpr);
+ return fpr;
+ }
+
+ case DataFormatStrictInt52: {
+ FPRReg fpr = fprAllocate();
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ m_jit.convertInt64ToDouble(gpr, fpr);
+ m_gprs.unlock(gpr);
+ return fpr;
+ }
+
+ // Unbox the double
+ case DataFormatJSDouble: {
+ GPRReg gpr = info.gpr();
FPRReg fpr = fprAllocate();
- m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
+ if (m_gprs.isLocked(gpr)) {
+ // Make sure we don't trample gpr if it is in use.
+ GPRReg temp = allocate();
+ m_jit.move(gpr, temp);
+ unboxDouble(temp, fpr);
+ unlock(temp);
+ } else
+ unboxDouble(gpr, fpr);
+
+ m_gprs.release(gpr);
m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+
info.fillDouble(*m_stream, fpr);
return fpr;
}
- DFG_ASSERT(m_jit.graph(), m_currentNode, info.registerFormat() == DataFormatDouble);
- FPRReg fpr = info.fpr();
- m_fprs.lock(fpr);
- return fpr;
+ case DataFormatDouble: {
+ FPRReg fpr = info.fpr();
+ m_fprs.lock(fpr);
+ return fpr;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return InvalidFPRReg;
+ }
}
GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
@@ -1093,34 +1308,37 @@ GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
AbstractValue& value = m_state.forNode(edge);
SpeculatedType type = value.m_type;
ASSERT((edge.useKind() != KnownCellUse && edge.useKind() != KnownStringUse) || !(value.m_type & ~SpecCell));
-
m_interpreter.filter(value, SpecCell);
- if (value.isClear()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
- return allocate();
- }
-
VirtualRegister virtualRegister = edge->virtualRegister();
GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
switch (info.registerFormat()) {
case DataFormatNone: {
+ if (info.spillFormat() == DataFormatInt32 || info.spillFormat() == DataFormatDouble) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ return allocate();
+ }
+
GPRReg gpr = allocate();
if (edge->hasConstant()) {
- JSValue jsValue = edge->asJSValue();
- m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
- m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
- info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
+ JSValue jsValue = valueOfJSConstant(edge.node());
+ if (jsValue.isCell()) {
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
+ info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
+ return gpr;
+ }
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
return gpr;
}
-
+ RELEASE_ASSERT(info.spillFormat() & DataFormatJS);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
info.fillJSValue(*m_stream, gpr, DataFormatJS);
if (type & ~SpecCell)
- speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchIfNotCell(JSValueRegs(gpr)));
+ speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister));
info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
return gpr;
}
@@ -1130,8 +1348,8 @@ GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
GPRReg gpr = info.gpr();
m_gprs.lock(gpr);
if (!ASSERT_DISABLED) {
- MacroAssembler::Jump checkCell = m_jit.branchIfCell(JSValueRegs(gpr));
- m_jit.abortWithReason(DFGIsNotCell);
+ MacroAssembler::Jump checkCell = m_jit.branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister);
+ m_jit.breakpoint();
checkCell.link(&m_jit);
}
return gpr;
@@ -1141,7 +1359,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
GPRReg gpr = info.gpr();
m_gprs.lock(gpr);
if (type & ~SpecCell)
- speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchIfNotCell(JSValueRegs(gpr)));
+ speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister));
info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
return gpr;
}
@@ -1149,16 +1367,20 @@ GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
case DataFormatJSInt32:
case DataFormatInt32:
case DataFormatJSDouble:
+ case DataFormatDouble:
case DataFormatJSBoolean:
case DataFormatBoolean:
- case DataFormatDouble:
- case DataFormatStorage:
case DataFormatInt52:
- case DataFormatStrictInt52:
- DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
+ case DataFormatStrictInt52: {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ return allocate();
+ }
+
+ case DataFormatStorage:
+ RELEASE_ASSERT_NOT_REACHED();
default:
- DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format");
+ RELEASE_ASSERT_NOT_REACHED();
return InvalidGPRReg;
}
}
@@ -1167,29 +1389,31 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge)
{
AbstractValue& value = m_state.forNode(edge);
SpeculatedType type = value.m_type;
- ASSERT(edge.useKind() != KnownBooleanUse || !(value.m_type & ~SpecBoolean));
-
m_interpreter.filter(value, SpecBoolean);
- if (value.isClear()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
- return allocate();
- }
-
VirtualRegister virtualRegister = edge->virtualRegister();
GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
switch (info.registerFormat()) {
case DataFormatNone: {
+ if (info.spillFormat() == DataFormatInt32 || info.spillFormat() == DataFormatDouble) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ return allocate();
+ }
+
GPRReg gpr = allocate();
if (edge->hasConstant()) {
- JSValue jsValue = edge->asJSValue();
- m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
- m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
- info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
+ JSValue jsValue = valueOfJSConstant(edge.node());
+ if (jsValue.isBoolean()) {
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
+ info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
+ return gpr;
+ }
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
return gpr;
}
- DFG_ASSERT(m_jit.graph(), m_currentNode, info.spillFormat() & DataFormatJS);
+ RELEASE_ASSERT(info.spillFormat() & DataFormatJS);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
@@ -1225,20 +1449,44 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge)
case DataFormatJSInt32:
case DataFormatInt32:
case DataFormatJSDouble:
+ case DataFormatDouble:
case DataFormatJSCell:
case DataFormatCell:
- case DataFormatDouble:
- case DataFormatStorage:
case DataFormatInt52:
- case DataFormatStrictInt52:
- DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
+ case DataFormatStrictInt52: {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ return allocate();
+ }
+
+ case DataFormatStorage:
+ RELEASE_ASSERT_NOT_REACHED();
default:
- DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format");
+ RELEASE_ASSERT_NOT_REACHED();
return InvalidGPRReg;
}
}
+JITCompiler::Jump SpeculativeJIT::convertToDouble(GPRReg value, FPRReg result, GPRReg tmp)
+{
+ JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, value, GPRInfo::tagTypeNumberRegister);
+
+ JITCompiler::Jump notNumber = m_jit.branchTest64(MacroAssembler::Zero, value, GPRInfo::tagTypeNumberRegister);
+
+ m_jit.move(value, tmp);
+ unboxDouble(tmp, result);
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ isInteger.link(&m_jit);
+
+ m_jit.convertInt32ToDouble(value, result);
+
+ done.link(&m_jit);
+
+ return notNumber;
+}
+
void SpeculativeJIT::compileBaseValueStoreBarrier(Edge& baseEdge, Edge& valueEdge)
{
#if ENABLE(GGC)
@@ -1268,24 +1516,41 @@ void SpeculativeJIT::compileObjectEquality(Node* node)
if (masqueradesAsUndefinedWatchpointIsStillValid()) {
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
+ JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
+ JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
} else {
+ GPRTemporary structure(this);
+ GPRReg structureGPR = structure.gpr();
+
+ m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
+ JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
m_jit.branchTest8(
MacroAssembler::NonZero,
- MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
+ m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
+ JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
m_jit.branchTest8(
MacroAssembler::NonZero,
- MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
@@ -1299,47 +1564,6 @@ void SpeculativeJIT::compileObjectEquality(Node* node)
jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
}
-void SpeculativeJIT::compileObjectStrictEquality(Edge objectChild, Edge otherChild)
-{
- SpeculateCellOperand op1(this, objectChild);
- JSValueOperand op2(this, otherChild);
- GPRTemporary result(this);
-
- GPRReg op1GPR = op1.gpr();
- GPRReg op2GPR = op2.gpr();
- GPRReg resultGPR = result.gpr();
-
- DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
-
- // At this point we know that we can perform a straight-forward equality comparison on pointer
- // values because we are doing strict equality.
- m_jit.compare64(MacroAssembler::Equal, op1GPR, op2GPR, resultGPR);
- m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
- jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
-}
-
-void SpeculativeJIT::compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode)
-{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
-
- SpeculateCellOperand op1(this, objectChild);
- JSValueOperand op2(this, otherChild);
-
- GPRReg op1GPR = op1.gpr();
- GPRReg op2GPR = op2.gpr();
-
- DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
-
- if (taken == nextBlock()) {
- branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR, notTaken);
- jump(taken);
- } else {
- branchPtr(MacroAssembler::Equal, op1GPR, op2GPR, taken);
- jump(notTaken);
- }
-}
-
void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild)
{
SpeculateCellOperand op1(this, leftChild);
@@ -1349,38 +1573,63 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge r
GPRReg op1GPR = op1.gpr();
GPRReg op2GPR = op2.gpr();
GPRReg resultGPR = result.gpr();
+ GPRTemporary structure;
+ GPRReg structureGPR = InvalidGPRReg;
bool masqueradesAsUndefinedWatchpointValid =
masqueradesAsUndefinedWatchpointIsStillValid();
+ if (!masqueradesAsUndefinedWatchpointValid) {
+ // The masquerades as undefined case will use the structure register, so allocate it here.
+ // Do this at the top of the function to avoid branching around a register allocation.
+ GPRTemporary realStructure(this);
+ structure.adopt(realStructure);
+ structureGPR = structure.gpr();
+ }
+
if (masqueradesAsUndefinedWatchpointValid) {
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
+ JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
} else {
+ m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
+ JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
m_jit.branchTest8(
MacroAssembler::NonZero,
- MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
// It seems that most of the time when programs do a == b where b may be either null/undefined
// or an object, b is usually an object. Balance the branches to make that case fast.
- MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(JSValueRegs(op2GPR));
+ MacroAssembler::Jump rightNotCell =
+ m_jit.branchTest64(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister);
// We know that within this branch, rightChild must be a cell.
if (masqueradesAsUndefinedWatchpointValid) {
DFG_TYPE_CHECK(
- JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2GPR));
+ JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
} else {
+ m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
DFG_TYPE_CHECK(
- JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2GPR));
+ JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
speculationCheck(BadType, JSValueRegs(op2GPR), rightChild,
m_jit.branchTest8(
MacroAssembler::NonZero,
- MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
@@ -1417,8 +1666,8 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge r
void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode)
{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+ BasicBlock* taken = branchNode->takenBlock();
+ BasicBlock* notTaken = branchNode->notTakenBlock();
SpeculateCellOperand op1(this, leftChild);
JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
@@ -1427,38 +1676,63 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild
GPRReg op1GPR = op1.gpr();
GPRReg op2GPR = op2.gpr();
GPRReg resultGPR = result.gpr();
+ GPRTemporary structure;
+ GPRReg structureGPR = InvalidGPRReg;
- bool masqueradesAsUndefinedWatchpointValid =
+ bool masqueradesAsUndefinedWatchpointValid =
masqueradesAsUndefinedWatchpointIsStillValid();
+ if (!masqueradesAsUndefinedWatchpointValid) {
+ // The masquerades as undefined case will use the structure register, so allocate it here.
+ // Do this at the top of the function to avoid branching around a register allocation.
+ GPRTemporary realStructure(this);
+ structure.adopt(realStructure);
+ structureGPR = structure.gpr();
+ }
+
if (masqueradesAsUndefinedWatchpointValid) {
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
+ JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
} else {
+ m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
DFG_TYPE_CHECK(
- JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
+ JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
m_jit.branchTest8(
MacroAssembler::NonZero,
- MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
// It seems that most of the time when programs do a == b where b may be either null/undefined
// or an object, b is usually an object. Balance the branches to make that case fast.
- MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(JSValueRegs(op2GPR));
+ MacroAssembler::Jump rightNotCell =
+ m_jit.branchTest64(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister);
// We know that within this branch, rightChild must be a cell.
if (masqueradesAsUndefinedWatchpointValid) {
DFG_TYPE_CHECK(
- JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2GPR));
+ JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
} else {
+ m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
DFG_TYPE_CHECK(
- JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2GPR));
+ JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
speculationCheck(BadType, JSValueRegs(op2GPR), rightChild,
m_jit.branchTest8(
MacroAssembler::NonZero,
- MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
@@ -1515,8 +1789,8 @@ void SpeculativeJIT::compileInt52Compare(Node* node, MacroAssembler::RelationalC
void SpeculativeJIT::compilePeepHoleInt52Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+ BasicBlock* taken = branchNode->takenBlock();
+ BasicBlock* notTaken = branchNode->notTakenBlock();
// The branch instruction will branch to the taken block.
// If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
@@ -1556,8 +1830,6 @@ void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse)
GPRReg resultGPR = result.gpr();
GPRTemporary structure;
GPRReg structureGPR = InvalidGPRReg;
- GPRTemporary scratch;
- GPRReg scratchGPR = InvalidGPRReg;
bool masqueradesAsUndefinedWatchpointValid =
masqueradesAsUndefinedWatchpointIsStillValid();
@@ -1566,33 +1838,37 @@ void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse)
// The masquerades as undefined case will use the structure register, so allocate it here.
// Do this at the top of the function to avoid branching around a register allocation.
GPRTemporary realStructure(this);
- GPRTemporary realScratch(this);
structure.adopt(realStructure);
- scratch.adopt(realScratch);
structureGPR = structure.gpr();
- scratchGPR = scratch.gpr();
}
- MacroAssembler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR));
+ MacroAssembler::Jump notCell = m_jit.branchTest64(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
if (masqueradesAsUndefinedWatchpointValid) {
DFG_TYPE_CHECK(
- JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(valueGPR));
+ JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(valueGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
} else {
+ m_jit.loadPtr(MacroAssembler::Address(valueGPR, JSCell::structureOffset()), structureGPR);
+
DFG_TYPE_CHECK(
- JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(valueGPR));
+ JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
MacroAssembler::Jump isNotMasqueradesAsUndefined =
m_jit.branchTest8(
MacroAssembler::Zero,
- MacroAssembler::Address(valueGPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined));
- m_jit.emitLoadStructure(valueGPR, structureGPR, scratchGPR);
speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse,
m_jit.branchPtr(
MacroAssembler::Equal,
MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()),
- MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic))));
+ MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin))));
isNotMasqueradesAsUndefined.link(&m_jit);
}
@@ -1634,7 +1910,7 @@ void SpeculativeJIT::compileLogicalNot(Node* node)
return;
}
- case DoubleRepUse: {
+ case NumberUse: {
SpeculateDoubleOperand value(this, node->child1());
FPRTemporary scratch(this);
GPRTemporary result(this);
@@ -1646,8 +1922,7 @@ void SpeculativeJIT::compileLogicalNot(Node* node)
return;
}
- case BooleanUse:
- case KnownBooleanUse: {
+ case BooleanUse: {
if (!needsTypeCheck(node->child1(), SpecBoolean)) {
SpeculateBooleanOperand value(this, node->child1());
GPRTemporary result(this, Reuse, value);
@@ -1698,7 +1973,7 @@ void SpeculativeJIT::compileLogicalNot(Node* node)
return compileStringZeroLength(node);
default:
- DFG_CRASH(m_jit.graph(), node, "Bad use kind");
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
}
@@ -1707,36 +1982,32 @@ void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, Ba
{
JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
GPRTemporary scratch(this);
- GPRTemporary structure;
GPRReg valueGPR = value.gpr();
GPRReg scratchGPR = scratch.gpr();
- GPRReg structureGPR = InvalidGPRReg;
-
- if (!masqueradesAsUndefinedWatchpointIsStillValid()) {
- GPRTemporary realStructure(this);
- structure.adopt(realStructure);
- structureGPR = structure.gpr();
- }
-
- MacroAssembler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR));
+
+ MacroAssembler::Jump notCell = m_jit.branchTest64(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
if (masqueradesAsUndefinedWatchpointIsStillValid()) {
DFG_TYPE_CHECK(
- JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(valueGPR));
+ JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(valueGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
} else {
+ m_jit.loadPtr(MacroAssembler::Address(valueGPR, JSCell::structureOffset()), scratchGPR);
+
DFG_TYPE_CHECK(
- JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(valueGPR));
+ JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ scratchGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
- JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(
- JITCompiler::Zero,
- MacroAssembler::Address(valueGPR, JSCell::typeInfoFlagsOffset()),
- TrustedImm32(MasqueradesAsUndefined));
+ JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::Zero, MacroAssembler::Address(scratchGPR, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
- m_jit.emitLoadStructure(valueGPR, structureGPR, scratchGPR);
speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse,
m_jit.branchPtr(
MacroAssembler::Equal,
- MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()),
- MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic))));
+ MacroAssembler::Address(scratchGPR, Structure::globalObjectOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin))));
isNotMasqueradesAsUndefined.link(&m_jit);
}
@@ -1758,8 +2029,8 @@ void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, Ba
void SpeculativeJIT::emitBranch(Node* node)
{
- BasicBlock* taken = node->branchData()->taken.block;
- BasicBlock* notTaken = node->branchData()->notTaken.block;
+ BasicBlock* taken = node->takenBlock();
+ BasicBlock* notTaken = node->notTakenBlock();
switch (node->child1().useKind()) {
case ObjectOrOtherUse: {
@@ -1768,7 +2039,7 @@ void SpeculativeJIT::emitBranch(Node* node)
}
case Int32Use:
- case DoubleRepUse: {
+ case NumberUse: {
if (node->child1().useKind() == Int32Use) {
bool invert = false;
@@ -1793,18 +2064,12 @@ void SpeculativeJIT::emitBranch(Node* node)
return;
}
- case StringUse: {
- emitStringBranch(node->child1(), taken, notTaken);
- return;
- }
-
case UntypedUse:
- case BooleanUse:
- case KnownBooleanUse: {
+ case BooleanUse: {
JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
GPRReg valueGPR = value.gpr();
- if (node->child1().useKind() == BooleanUse || node->child1().useKind() == KnownBooleanUse) {
+ if (node->child1().useKind() == BooleanUse) {
if (!needsTypeCheck(node->child1(), SpecBoolean)) {
MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
@@ -1853,7 +2118,7 @@ void SpeculativeJIT::emitBranch(Node* node)
}
default:
- DFG_CRASH(m_jit.graph(), m_currentNode, "Bad use kind");
+ RELEASE_ASSERT_NOT_REACHED();
}
}
@@ -1867,50 +2132,40 @@ void SpeculativeJIT::compile(Node* node)
switch (op) {
case JSConstant:
- case DoubleConstant:
- case Int52Constant:
- case PhantomDirectArguments:
- case PhantomClonedArguments:
initConstantInfo(node);
break;
+ case PhantomArguments:
+ initConstantInfo(node);
+ break;
+
+ case WeakJSConstant:
+ m_jit.addWeakReference(node->weakConstant());
+ initConstantInfo(node);
+ break;
+
case Identity: {
- speculate(node, node->child1());
- switch (node->child1().useKind()) {
- case DoubleRepUse:
- case DoubleRepRealUse:
- case DoubleRepMachineIntUse: {
- SpeculateDoubleOperand op(this, node->child1());
- FPRTemporary scratch(this, op);
- m_jit.moveDouble(op.fpr(), scratch.fpr());
- doubleResult(scratch.fpr(), node);
- break;
- }
- case Int52RepUse: {
- SpeculateInt52Operand op(this, node->child1());
- GPRTemporary result(this, Reuse, op);
- m_jit.move(op.gpr(), result.gpr());
- int52Result(result.gpr(), node);
- break;
- }
- default: {
- JSValueOperand op(this, node->child1());
- GPRTemporary result(this, Reuse, op);
- m_jit.move(op.gpr(), result.gpr());
- jsValueResult(result.gpr(), node);
- break;
- }
- } // switch
+ // CSE should always eliminate this.
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
case GetLocal: {
+ SpeculatedType prediction = node->variableAccessData()->prediction();
AbstractValue& value = m_state.variables().operand(node->local());
+ // If we have no prediction for this local, then don't attempt to compile.
+ if (prediction == SpecNone) {
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
+ break;
+ }
+
// If the CFA is tracking this variable and it found that the variable
// cannot have been assigned, then don't attempt to proceed.
if (value.isClear()) {
- m_compileOkay = false;
+ // FIXME: We should trap instead.
+ // https://bugs.webkit.org/show_bug.cgi?id=110383
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
break;
}
@@ -1978,18 +2233,13 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case MovHint: {
- compileMovHint(m_currentNode);
- noResult(node);
- break;
- }
-
- case ZombieHint: {
- recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
- noResult(node);
+ case MovHint:
+ case ZombieHint:
+ case Check: {
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
-
+
case SetLocal: {
switch (node->variableAccessData()->flushFormat()) {
case FlushedDouble: {
@@ -2036,7 +2286,8 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case FlushedJSValue: {
+ case FlushedJSValue:
+ case FlushedArguments: {
JSValueOperand value(this, node->child1());
m_jit.store64(value.gpr(), JITCompiler::addressFor(node->machineLocal()));
noResult(node);
@@ -2045,7 +2296,7 @@ void SpeculativeJIT::compile(Node* node)
}
default:
- DFG_CRASH(m_jit.graph(), node, "Bad flush format");
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
@@ -2057,24 +2308,23 @@ void SpeculativeJIT::compile(Node* node)
// But it may be profitable to use this as a hook to run speculation checks
// on arguments, thereby allowing us to trivially eliminate such checks if
// the argument is not used.
- recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat()));
break;
case BitAnd:
case BitOr:
case BitXor:
- if (node->child1()->isInt32Constant()) {
+ if (isInt32Constant(node->child1().node())) {
SpeculateInt32Operand op2(this, node->child2());
GPRTemporary result(this, Reuse, op2);
- bitOp(op, node->child1()->asInt32(), op2.gpr(), result.gpr());
+ bitOp(op, valueOfInt32Constant(node->child1().node()), op2.gpr(), result.gpr());
int32Result(result.gpr(), node);
- } else if (node->child2()->isInt32Constant()) {
+ } else if (isInt32Constant(node->child2().node())) {
SpeculateInt32Operand op1(this, node->child1());
GPRTemporary result(this, Reuse, op1);
- bitOp(op, node->child2()->asInt32(), op1.gpr(), result.gpr());
+ bitOp(op, valueOfInt32Constant(node->child2().node()), op1.gpr(), result.gpr());
int32Result(result.gpr(), node);
} else {
@@ -2093,11 +2343,11 @@ void SpeculativeJIT::compile(Node* node)
case BitRShift:
case BitLShift:
case BitURShift:
- if (node->child2()->isInt32Constant()) {
+ if (isInt32Constant(node->child2().node())) {
SpeculateInt32Operand op1(this, node->child1());
GPRTemporary result(this, Reuse, op1);
- shiftOp(op, op1.gpr(), node->child2()->asInt32() & 0x1f, result.gpr());
+ shiftOp(op, op1.gpr(), valueOfInt32Constant(node->child2().node()) & 0x1f, result.gpr());
int32Result(result.gpr(), node);
} else {
@@ -2129,62 +2379,24 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case DoubleRep: {
- compileDoubleRep(node);
+ case Int32ToDouble: {
+ compileInt32ToDouble(node);
break;
}
- case ValueRep: {
- compileValueRep(node);
+ case Int52ToValue: {
+ JSValueOperand operand(this, node->child1());
+ GPRTemporary result(this, Reuse, operand);
+ m_jit.move(operand.gpr(), result.gpr());
+ jsValueResult(result.gpr(), node);
break;
}
- case Int52Rep: {
- switch (node->child1().useKind()) {
- case Int32Use: {
- SpeculateInt32Operand operand(this, node->child1());
- GPRTemporary result(this, Reuse, operand);
-
- m_jit.signExtend32ToPtr(operand.gpr(), result.gpr());
-
- strictInt52Result(result.gpr(), node);
- break;
- }
-
- case MachineIntUse: {
- GPRTemporary result(this);
- GPRReg resultGPR = result.gpr();
-
- convertMachineInt(node->child1(), resultGPR);
-
- strictInt52Result(resultGPR, node);
- break;
- }
-
- case DoubleRepMachineIntUse: {
- SpeculateDoubleOperand value(this, node->child1());
- FPRReg valueFPR = value.fpr();
-
- GPRFlushedCallResult result(this);
- GPRReg resultGPR = result.gpr();
-
- flushRegisters();
-
- callOperation(operationConvertDoubleToInt52, resultGPR, valueFPR);
-
- DFG_TYPE_CHECK(
- JSValueRegs(), node->child1(), SpecInt52AsDouble,
- m_jit.branch64(
- JITCompiler::Equal, resultGPR,
- JITCompiler::TrustedImm64(JSValue::notInt52)));
-
- strictInt52Result(resultGPR, node);
- break;
- }
-
- default:
- DFG_CRASH(m_jit.graph(), node, "Bad use kind");
- }
+ case Int52ToDouble: {
+ SpeculateDoubleOperand operand(this, node->child1());
+ FPRTemporary result(this, operand);
+ m_jit.moveDouble(operand.fpr(), result.fpr());
+ doubleResult(result.fpr(), node);
break;
}
@@ -2197,7 +2409,7 @@ void SpeculativeJIT::compile(Node* node)
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
if (isKnownNotNumber(node->child1().node()) || isKnownNotNumber(node->child2().node()))
callOperation(operationValueAddNotNumber, result.gpr(), op1GPR, op2GPR);
else
@@ -2210,10 +2422,6 @@ void SpeculativeJIT::compile(Node* node)
case ArithAdd:
compileAdd(node);
break;
-
- case ArithClz32:
- compileArithClz32(node);
- break;
case MakeRope:
compileMakeRope(node);
@@ -2257,7 +2465,7 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case DoubleRepUse: {
+ case NumberUse: {
SpeculateDoubleOperand op1(this, node->child1());
FPRTemporary result(this);
@@ -2267,7 +2475,7 @@ void SpeculativeJIT::compile(Node* node)
}
default:
- DFG_CRASH(m_jit.graph(), node, "Bad use kind");
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
break;
@@ -2295,7 +2503,7 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case DoubleRepUse: {
+ case NumberUse: {
SpeculateDoubleOperand op1(this, node->child1());
SpeculateDoubleOperand op2(this, node->child2());
FPRTemporary result(this, op1);
@@ -2334,35 +2542,22 @@ void SpeculativeJIT::compile(Node* node)
}
default:
- DFG_CRASH(m_jit.graph(), node, "Bad use kind");
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
break;
}
-
- case ArithPow:
- compileArithPow(node);
- break;
-
- case ArithSqrt:
- compileArithSqrt(node);
- break;
-
- case ArithFRound: {
+
+ case ArithSqrt: {
SpeculateDoubleOperand op1(this, node->child1());
FPRTemporary result(this, op1);
- m_jit.convertDoubleToFloat(op1.fpr(), result.fpr());
- m_jit.convertFloatToDouble(result.fpr(), result.fpr());
+ m_jit.sqrtDouble(op1.fpr(), result.fpr());
doubleResult(result.fpr(), node);
break;
}
-
- case ArithRound:
- compileArithRound(node);
- break;
-
+
case ArithSin: {
SpeculateDoubleOperand op1(this, node->child1());
FPRReg op1FPR = op1.fpr();
@@ -2387,10 +2582,6 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case ArithLog:
- compileArithLog(node);
- break;
-
case LogicalNot:
compileLogicalNot(node);
break;
@@ -2416,7 +2607,7 @@ void SpeculativeJIT::compile(Node* node)
break;
case CompareEqConstant:
- ASSERT(node->child2()->asJSValue().isNull());
+ ASSERT(isNullConstant(node->child2().node()));
if (nonSpeculativeCompareNull(node, node->child1()))
return;
break;
@@ -2426,6 +2617,11 @@ void SpeculativeJIT::compile(Node* node)
return;
break;
+ case CompareStrictEqConstant:
+ if (compileStrictEqForConstant(node, node->child1(), valueOfJSConstant(node->child2().node())))
+ return;
+ break;
+
case CompareStrictEq:
if (compileStrictEq(node))
return;
@@ -2462,24 +2658,9 @@ void SpeculativeJIT::compile(Node* node)
switch (node->arrayMode().type()) {
case Array::SelectUsingPredictions:
case Array::ForceExit:
- DFG_CRASH(m_jit.graph(), node, "Bad array mode type");
- break;
- case Array::Undecided: {
- SpeculateStrictInt32Operand index(this, node->child2());
- GPRTemporary result(this, Reuse, index);
- GPRReg indexGPR = index.gpr();
- GPRReg resultGPR = result.gpr();
-
- use(node->child1());
- index.use();
-
- speculationCheck(OutOfBounds, JSValueRegs(), node,
- m_jit.branch32(MacroAssembler::LessThan, indexGPR, MacroAssembler::TrustedImm32(0)));
-
- m_jit.move(MacroAssembler::TrustedImm64(ValueUndefined), resultGPR);
- jsValueResult(resultGPR, node, UseChildrenCalledExplicitly);
+ RELEASE_ASSERT_NOT_REACHED();
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
break;
- }
case Array::Generic: {
JSValueOperand base(this, node->child1());
JSValueOperand property(this, node->child2());
@@ -2487,7 +2668,7 @@ void SpeculativeJIT::compile(Node* node)
GPRReg propertyGPR = property.gpr();
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
callOperation(operationGetByVal, result.gpr(), baseGPR, propertyGPR);
jsValueResult(result.gpr(), node);
@@ -2509,17 +2690,7 @@ void SpeculativeJIT::compile(Node* node)
GPRTemporary result(this);
m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.gpr());
- if (node->arrayMode().isSaneChain()) {
- ASSERT(node->arrayMode().type() == Array::Contiguous);
- JITCompiler::Jump notHole = m_jit.branchTest64(
- MacroAssembler::NonZero, result.gpr());
- m_jit.move(TrustedImm64(JSValue::encode(jsUndefined())), result.gpr());
- notHole.link(&m_jit);
- } else {
- speculationCheck(
- LoadFromHole, JSValueRegs(), 0,
- m_jit.branchTest64(MacroAssembler::Zero, result.gpr()));
- }
+ speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchTest64(MacroAssembler::Zero, result.gpr()));
jsValueResult(result.gpr(), node, node->arrayMode().type() == Array::Int32 ? DataFormatJSInt32 : DataFormatJS);
break;
}
@@ -2662,11 +2833,8 @@ void SpeculativeJIT::compile(Node* node)
case Array::String:
compileGetByValOnString(node);
break;
- case Array::DirectArguments:
- compileGetByValOnDirectArguments(node);
- break;
- case Array::ScopedArguments:
- compileGetByValOnScopedArguments(node);
+ case Array::Arguments:
+ compileGetByValOnArguments(node);
break;
default: {
TypedArrayType type = node->arrayMode().typedArrayType();
@@ -2692,10 +2860,12 @@ void SpeculativeJIT::compile(Node* node)
switch (arrayMode.type()) {
case Array::SelectUsingPredictions:
case Array::ForceExit:
- DFG_CRASH(m_jit.graph(), node, "Bad array mode type");
+ RELEASE_ASSERT_NOT_REACHED();
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
+ alreadyHandled = true;
break;
case Array::Generic: {
- DFG_ASSERT(m_jit.graph(), node, node->op() == PutByVal || node->op() == PutByValDirect);
+ RELEASE_ASSERT(node->op() == PutByVal);
JSValueOperand arg1(this, child1);
JSValueOperand arg2(this, child2);
@@ -2705,9 +2875,9 @@ void SpeculativeJIT::compile(Node* node)
GPRReg arg3GPR = arg3.gpr();
flushRegisters();
if (node->op() == PutByValDirect)
- callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict, arg1GPR, arg2GPR, arg3GPR);
+ callOperation(m_jit.isStrictModeFor(node->codeOrigin) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict, arg1GPR, arg2GPR, arg3GPR);
else
- callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict, arg1GPR, arg2GPR, arg3GPR);
+ callOperation(m_jit.isStrictModeFor(node->codeOrigin) ? operationPutByValStrict : operationPutByValNonStrict, arg1GPR, arg2GPR, arg3GPR);
noResult(node);
alreadyHandled = true;
@@ -2896,6 +3066,47 @@ void SpeculativeJIT::compile(Node* node)
break;
}
+ case Array::Arguments: {
+ JSValueOperand value(this, child3);
+ GPRTemporary scratch(this);
+ GPRTemporary scratch2(this);
+
+ GPRReg valueReg = value.gpr();
+ GPRReg scratchReg = scratch.gpr();
+ GPRReg scratch2Reg = scratch2.gpr();
+
+ if (!m_compileOkay)
+ return;
+
+ // Two really lame checks.
+ speculationCheck(
+ Uncountable, JSValueSource(), 0,
+ m_jit.branch32(
+ MacroAssembler::AboveOrEqual, propertyReg,
+ MacroAssembler::Address(baseReg, Arguments::offsetOfNumArguments())));
+ speculationCheck(
+ Uncountable, JSValueSource(), 0,
+ m_jit.branchTestPtr(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(
+ baseReg, Arguments::offsetOfSlowArgumentData())));
+
+ m_jit.move(propertyReg, scratch2Reg);
+ m_jit.signExtend32ToPtr(scratch2Reg, scratch2Reg);
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, Arguments::offsetOfRegisters()),
+ scratchReg);
+
+ m_jit.store64(
+ valueReg,
+ MacroAssembler::BaseIndex(
+ scratchReg, scratch2Reg, MacroAssembler::TimesEight,
+ CallFrame::thisArgumentOffset() * sizeof(Register) + sizeof(Register)));
+
+ noResult(node);
+ break;
+ }
+
default: {
TypedArrayType type = arrayMode.typedArrayType();
if (isInt(type))
@@ -2917,7 +3128,7 @@ void SpeculativeJIT::compile(Node* node)
GPRReg argumentGPR = argument.gpr();
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
// Must use jsValueResult because otherwise we screw up register
@@ -2932,7 +3143,7 @@ void SpeculativeJIT::compile(Node* node)
GPRReg argumentGPR = argument.gpr();
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
callOperation(operationRegExpExec, result.gpr(), baseGPR, argumentGPR);
jsValueResult(result.gpr(), node);
@@ -2946,7 +3157,7 @@ void SpeculativeJIT::compile(Node* node)
GPRReg argumentGPR = argument.gpr();
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
// If we add a DataFormatBool, we should use it here.
@@ -2989,7 +3200,7 @@ void SpeculativeJIT::compile(Node* node)
addSlowPathGenerator(
slowPathCall(
- slowPath, this, operationArrayPush, storageLengthGPR,
+ slowPath, this, operationArrayPush, NoResult, storageLengthGPR,
valueGPR, baseGPR));
jsValueResult(storageLengthGPR, node);
@@ -3001,7 +3212,7 @@ void SpeculativeJIT::compile(Node* node)
FPRReg valueFPR = value.fpr();
DFG_TYPE_CHECK(
- JSValueRegs(), node->child2(), SpecDoubleReal,
+ JSValueRegs(), node->child2(), SpecFullRealNumber,
m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, valueFPR, valueFPR));
m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
@@ -3013,7 +3224,7 @@ void SpeculativeJIT::compile(Node* node)
addSlowPathGenerator(
slowPathCall(
- slowPath, this, operationArrayPushDouble, storageLengthGPR,
+ slowPath, this, operationArrayPushDouble, NoResult, storageLengthGPR,
valueFPR, baseGPR));
jsValueResult(storageLengthGPR, node);
@@ -3088,7 +3299,7 @@ void SpeculativeJIT::compile(Node* node)
// FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
// length and the new length.
m_jit.store64(
- MacroAssembler::TrustedImm64(bitwise_cast<int64_t>(PNaN)), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
+ MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
slowCase = m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempFPR, tempFPR);
boxDouble(tempFPR, valueGPR);
} else {
@@ -3155,7 +3366,7 @@ void SpeculativeJIT::compile(Node* node)
}
case DFG::Jump: {
- jump(node->targetBlock());
+ jump(node->takenBlock());
noResult(node);
break;
}
@@ -3177,7 +3388,12 @@ void SpeculativeJIT::compile(Node* node)
JSValueOperand op1(this, node->child1());
m_jit.move(op1.gpr(), GPRInfo::returnValueGPR);
- m_jit.emitFunctionEpilogue();
+ // Grab the return address.
+ m_jit.emitGetReturnPCFromCallFrameHeaderPtr(GPRInfo::regT1);
+ // Restore our caller's "r".
+ m_jit.emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::callFrameRegister);
+ // Return.
+ m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT1);
m_jit.ret();
noResult(node);
@@ -3192,56 +3408,8 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case BooleanToNumber: {
- switch (node->child1().useKind()) {
- case BooleanUse: {
- JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
- GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
-
- m_jit.move(value.gpr(), result.gpr());
- m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
- DFG_TYPE_CHECK(
- JSValueRegs(value.gpr()), node->child1(), SpecBoolean, m_jit.branchTest64(
- JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
-
- int32Result(result.gpr(), node);
- break;
- }
-
- case UntypedUse: {
- JSValueOperand value(this, node->child1());
- GPRTemporary result(this);
-
- if (!m_interpreter.needsTypeCheck(node->child1(), SpecBoolInt32 | SpecBoolean)) {
- m_jit.move(value.gpr(), result.gpr());
- m_jit.and32(TrustedImm32(1), result.gpr());
- int32Result(result.gpr(), node);
- break;
- }
-
- m_jit.move(value.gpr(), result.gpr());
- m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
- JITCompiler::Jump isBoolean = m_jit.branchTest64(
- JITCompiler::Zero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1)));
- m_jit.move(value.gpr(), result.gpr());
- JITCompiler::Jump done = m_jit.jump();
- isBoolean.link(&m_jit);
- m_jit.or64(GPRInfo::tagTypeNumberRegister, result.gpr());
- done.link(&m_jit);
-
- jsValueResult(result.gpr(), node);
- break;
- }
-
- default:
- DFG_CRASH(m_jit.graph(), node, "Bad use kind");
- break;
- }
- break;
- }
-
case ToPrimitive: {
- DFG_ASSERT(m_jit.graph(), node, node->child1().useKind() == UntypedUse);
+ RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
JSValueOperand op1(this, node->child1());
GPRTemporary result(this, Reuse, op1);
@@ -3250,52 +3418,54 @@ void SpeculativeJIT::compile(Node* node)
op1.use();
- MacroAssembler::Jump alreadyPrimitive = m_jit.branchIfNotCell(JSValueRegs(op1GPR));
- MacroAssembler::Jump notPrimitive = m_jit.branchIfObject(op1GPR);
-
- alreadyPrimitive.link(&m_jit);
- m_jit.move(op1GPR, resultGPR);
-
- addSlowPathGenerator(
- slowPathCall(notPrimitive, this, operationToPrimitive, resultGPR, op1GPR));
+ if (!(m_state.forNode(node->child1()).m_type & ~(SpecFullNumber | SpecBoolean)))
+ m_jit.move(op1GPR, resultGPR);
+ else {
+ MacroAssembler::Jump alreadyPrimitive = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagMaskRegister);
+ MacroAssembler::Jump notPrimitive = m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1GPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()));
+
+ alreadyPrimitive.link(&m_jit);
+ m_jit.move(op1GPR, resultGPR);
+
+ addSlowPathGenerator(
+ slowPathCall(notPrimitive, this, operationToPrimitive, resultGPR, op1GPR));
+ }
jsValueResult(resultGPR, node, UseChildrenCalledExplicitly);
break;
}
- case ToString:
- case CallStringConstructor: {
+ case ToString: {
if (node->child1().useKind() == UntypedUse) {
JSValueOperand op1(this, node->child1());
GPRReg op1GPR = op1.gpr();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultGPR = result.gpr();
flushRegisters();
JITCompiler::Jump done;
if (node->child1()->prediction() & SpecString) {
- JITCompiler::Jump slowPath1 = m_jit.branchIfNotCell(JSValueRegs(op1GPR));
- JITCompiler::Jump slowPath2 = m_jit.branchIfNotString(op1GPR);
+ JITCompiler::Jump slowPath1 = m_jit.branchTest64(
+ JITCompiler::NonZero, op1GPR, GPRInfo::tagMaskRegister);
+ JITCompiler::Jump slowPath2 = m_jit.branchPtr(
+ JITCompiler::NotEqual,
+ JITCompiler::Address(op1GPR, JSCell::structureOffset()),
+ TrustedImmPtr(m_jit.vm()->stringStructure.get()));
m_jit.move(op1GPR, resultGPR);
done = m_jit.jump();
slowPath1.link(&m_jit);
slowPath2.link(&m_jit);
}
- if (op == ToString)
- callOperation(operationToString, resultGPR, op1GPR);
- else {
- ASSERT(op == CallStringConstructor);
- callOperation(operationCallStringConstructor, resultGPR, op1GPR);
- }
+ callOperation(operationToString, resultGPR, op1GPR);
if (done.isSet())
done.link(&m_jit);
cellResult(resultGPR, node);
break;
}
- compileToStringOrCallStringConstructorOnCell(node);
+ compileToStringOnCell(node);
break;
}
@@ -3305,10 +3475,10 @@ void SpeculativeJIT::compile(Node* node)
}
case NewArray: {
- JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
- if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) {
+ JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin);
+ if (!globalObject->isHavingABadTime() && !hasArrayStorage(node->indexingType())) {
Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType());
- DFG_ASSERT(m_jit.graph(), node, structure->indexingType() == node->indexingType());
+ RELEASE_ASSERT(structure->indexingType() == node->indexingType());
ASSERT(
hasUndecided(structure->indexingType())
|| hasInt32(structure->indexingType())
@@ -3341,7 +3511,7 @@ void SpeculativeJIT::compile(Node* node)
SpeculateDoubleOperand operand(this, use);
FPRReg opFPR = operand.fpr();
DFG_TYPE_CHECK(
- JSValueRegs(), use, SpecDoubleReal,
+ JSValueRegs(), use, SpecFullRealNumber,
m_jit.branchDouble(
MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR));
m_jit.storeDouble(opFPR, MacroAssembler::Address(storageGPR, sizeof(double) * operandIdx));
@@ -3379,7 +3549,7 @@ void SpeculativeJIT::compile(Node* node)
if (!node->numChildren()) {
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
callOperation(operationNewEmptyArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()));
cellResult(result.gpr(), node);
break;
@@ -3406,7 +3576,7 @@ void SpeculativeJIT::compile(Node* node)
FPRReg opFPR = operand.fpr();
GPRReg scratchGPR = scratch.gpr();
DFG_TYPE_CHECK(
- JSValueRegs(), use, SpecDoubleReal,
+ JSValueRegs(), use, SpecFullRealNumber,
m_jit.branchDouble(
MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR));
m_jit.boxDouble(opFPR, scratchGPR);
@@ -3458,7 +3628,7 @@ void SpeculativeJIT::compile(Node* node)
m_jit.storePtr(TrustedImmPtr(scratchSize), scratch.gpr());
}
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
callOperation(
operationNewArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()),
@@ -3476,8 +3646,8 @@ void SpeculativeJIT::compile(Node* node)
}
case NewArrayWithSize: {
- JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
- if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) {
+ JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin);
+ if (!globalObject->isHavingABadTime() && !hasArrayStorage(node->indexingType())) {
SpeculateStrictInt32Operand size(this, node->child1());
GPRTemporary result(this);
GPRTemporary storage(this);
@@ -3491,7 +3661,7 @@ void SpeculativeJIT::compile(Node* node)
GPRReg scratch2GPR = scratch2.gpr();
MacroAssembler::JumpList slowCases;
- slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)));
+ slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_SPARSE_ARRAY_INDEX)));
ASSERT((1 << 3) == sizeof(JSValue));
m_jit.move(sizeGPR, scratchGPR);
@@ -3501,13 +3671,13 @@ void SpeculativeJIT::compile(Node* node)
emitAllocateBasicStorage(resultGPR, storageGPR));
m_jit.subPtr(scratchGPR, storageGPR);
Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType());
- emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
+ emitAllocateJSObject<JSArray>(resultGPR, ImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
if (hasDouble(node->indexingType())) {
- m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
+ m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR);
m_jit.move(sizeGPR, scratch2GPR);
MacroAssembler::Jump done = m_jit.branchTest32(MacroAssembler::Zero, scratch2GPR);
MacroAssembler::Label loop = m_jit.label();
@@ -3517,11 +3687,12 @@ void SpeculativeJIT::compile(Node* node)
done.link(&m_jit);
}
- addSlowPathGenerator(std::make_unique<CallArrayAllocatorWithVariableSizeSlowPathGenerator>(
+ addSlowPathGenerator(adoptPtr(
+ new CallArrayAllocatorWithVariableSizeSlowPathGenerator(
slowCases, this, operationNewArrayWithSize, resultGPR,
globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()),
globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage),
- sizeGPR));
+ sizeGPR)));
cellResult(resultGPR, node);
break;
@@ -3530,10 +3701,10 @@ void SpeculativeJIT::compile(Node* node)
SpeculateStrictInt32Operand size(this, node->child1());
GPRReg sizeGPR = size.gpr();
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultGPR = result.gpr();
GPRReg structureGPR = selectScratchGPR(sizeGPR);
- MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
+ MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_SPARSE_ARRAY_INDEX));
m_jit.move(TrustedImmPtr(globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())), structureGPR);
MacroAssembler::Jump done = m_jit.jump();
bigLength.link(&m_jit);
@@ -3545,9 +3716,9 @@ void SpeculativeJIT::compile(Node* node)
}
case NewArrayBuffer: {
- JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
+ JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin);
IndexingType indexingType = node->indexingType();
- if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(indexingType)) {
+ if (!globalObject->isHavingABadTime() && !hasArrayStorage(indexingType)) {
unsigned numElements = node->numConstants();
GPRTemporary result(this);
@@ -3558,7 +3729,7 @@ void SpeculativeJIT::compile(Node* node)
emitAllocateJSArray(resultGPR, globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType), storageGPR, numElements);
- DFG_ASSERT(m_jit.graph(), node, indexingType & IsArray);
+ RELEASE_ASSERT(indexingType & IsArray);
JSValue* data = m_jit.codeBlock()->constantBuffer(node->startConstant());
if (indexingType == ArrayWithDouble) {
for (unsigned index = 0; index < node->numConstants(); ++index) {
@@ -3580,7 +3751,7 @@ void SpeculativeJIT::compile(Node* node)
}
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
callOperation(operationNewArrayBuffer, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), node->startConstant(), node->numConstants());
@@ -3599,10 +3770,10 @@ void SpeculativeJIT::compile(Node* node)
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultGPR = result.gpr();
- JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
+ JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin);
callOperation(
operationNewTypedArrayWithOneArgumentForType(node->typedArrayType()),
resultGPR, globalObject->typedArrayStructure(node->typedArrayType()),
@@ -3612,7 +3783,7 @@ void SpeculativeJIT::compile(Node* node)
break;
}
default:
- DFG_CRASH(m_jit.graph(), node, "Bad use kind");
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
break;
@@ -3620,7 +3791,7 @@ void SpeculativeJIT::compile(Node* node)
case NewRegexp: {
flushRegisters();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
callOperation(operationNewRegexp, result.gpr(), m_jit.codeBlock()->regexp(node->regexpIndex()));
@@ -3636,14 +3807,17 @@ void SpeculativeJIT::compile(Node* node)
GPRReg tempGPR = temp.gpr();
MacroAssembler::JumpList slowCases;
- slowCases.append(m_jit.branchIfNotCell(JSValueRegs(thisValueGPR)));
+ slowCases.append(m_jit.branchTest64(
+ MacroAssembler::NonZero, thisValueGPR, GPRInfo::tagMaskRegister));
+ m_jit.loadPtr(
+ MacroAssembler::Address(thisValueGPR, JSCell::structureOffset()), tempGPR);
slowCases.append(m_jit.branch8(
MacroAssembler::NotEqual,
- MacroAssembler::Address(thisValueGPR, JSCell::typeInfoTypeOffset()),
+ MacroAssembler::Address(tempGPR, Structure::typeInfoTypeOffset()),
TrustedImm32(FinalObjectType)));
m_jit.move(thisValueGPR, tempGPR);
J_JITOperation_EJ function;
- if (m_jit.graph().executableFor(node->origin.semantic)->isStrictMode())
+ if (m_jit.graph().executableFor(node->codeOrigin)->isStrictMode())
function = operationToThisStrict;
else
function = operationToThis;
@@ -3672,16 +3846,11 @@ void SpeculativeJIT::compile(Node* node)
GPRReg allocatorGPR = allocator.gpr();
GPRReg structureGPR = structure.gpr();
GPRReg scratchGPR = scratch.gpr();
- // Rare data is only used to access the allocator & structure
- // We can avoid using an additional GPR this way
- GPRReg rareDataGPR = structureGPR;
MacroAssembler::JumpList slowPath;
- m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfRareData()), rareDataGPR);
- slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, rareDataGPR));
- m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR);
- m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR);
+ m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR);
+ m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR);
slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, allocatorGPR));
emitAllocateJSObject(resultGPR, allocatorGPR, structureGPR, TrustedImmPtr(0), scratchGPR, slowPath);
@@ -3691,6 +3860,12 @@ void SpeculativeJIT::compile(Node* node)
break;
}
+ case AllocationProfileWatchpoint:
+ case TypedArrayWatchpoint: {
+ noResult(node);
+ break;
+ }
+
case NewObject: {
GPRTemporary result(this);
GPRTemporary allocator(this);
@@ -3722,39 +3897,85 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case GetArgumentCount: {
+ case GetScope: {
+ SpeculateCellOperand function(this, node->child1());
+ GPRTemporary result(this, Reuse, function);
+ m_jit.loadPtr(JITCompiler::Address(function.gpr(), JSFunction::offsetOfScopeChain()), result.gpr());
+ cellResult(result.gpr(), node);
+ break;
+ }
+
+ case GetMyScope: {
GPRTemporary result(this);
- m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), result.gpr());
- int32Result(result.gpr(), node);
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.loadPtr(JITCompiler::addressFor(JSStack::ScopeChain), resultGPR);
+ cellResult(resultGPR, node);
break;
}
- case GetScope:
- compileGetScope(node);
+ case SkipTopScope: {
+ SpeculateCellOperand scope(this, node->child1());
+ GPRTemporary result(this, Reuse, scope);
+ GPRReg resultGPR = result.gpr();
+ m_jit.move(scope.gpr(), resultGPR);
+ JITCompiler::Jump activationNotCreated =
+ m_jit.branchTest64(
+ JITCompiler::Zero,
+ JITCompiler::addressFor(
+ static_cast<VirtualRegister>(m_jit.graph().machineActivationRegister())));
+ m_jit.loadPtr(JITCompiler::Address(resultGPR, JSScope::offsetOfNext()), resultGPR);
+ activationNotCreated.link(&m_jit);
+ cellResult(resultGPR, node);
break;
+ }
- case SkipScope:
- compileSkipScope(node);
+ case SkipScope: {
+ SpeculateCellOperand scope(this, node->child1());
+ GPRTemporary result(this, Reuse, scope);
+ m_jit.loadPtr(JITCompiler::Address(scope.gpr(), JSScope::offsetOfNext()), result.gpr());
+ cellResult(result.gpr(), node);
break;
+ }
+
+ case GetClosureRegisters: {
+ if (WriteBarrierBase<Unknown>* registers = m_jit.graph().tryGetRegisters(node->child1().node())) {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+ m_jit.move(TrustedImmPtr(registers), resultGPR);
+ storageResult(resultGPR, node);
+ break;
+ }
+ SpeculateCellOperand scope(this, node->child1());
+ GPRTemporary result(this);
+ GPRReg scopeGPR = scope.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.loadPtr(JITCompiler::Address(scopeGPR, JSVariableObject::offsetOfRegisters()), resultGPR);
+ storageResult(resultGPR, node);
+ break;
+ }
case GetClosureVar: {
- SpeculateCellOperand base(this, node->child1());
+ StorageOperand registers(this, node->child1());
GPRTemporary result(this);
- GPRReg baseGPR = base.gpr();
+ GPRReg registersGPR = registers.gpr();
GPRReg resultGPR = result.gpr();
- m_jit.load64(JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset())), resultGPR);
+ m_jit.load64(JITCompiler::Address(registersGPR, node->varNumber() * sizeof(Register)), resultGPR);
jsValueResult(resultGPR, node);
break;
}
case PutClosureVar: {
- SpeculateCellOperand base(this, node->child1());
- JSValueOperand value(this, node->child2());
+ StorageOperand registers(this, node->child2());
+ JSValueOperand value(this, node->child3());
- GPRReg baseGPR = base.gpr();
+ GPRReg registersGPR = registers.gpr();
GPRReg valueGPR = value.gpr();
- m_jit.store64(valueGPR, JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset())));
+ speculate(node, node->child1());
+
+ m_jit.store64(valueGPR, JITCompiler::Address(registersGPR, node->varNumber() * sizeof(Register)));
noResult(node);
break;
}
@@ -3771,7 +3992,7 @@ void SpeculativeJIT::compile(Node* node)
base.use();
- cachedGetById(node->origin.semantic, baseGPR, resultGPR, node->identifierNumber());
+ cachedGetById(node->codeOrigin, baseGPR, resultGPR, node->identifierNumber());
jsValueResult(resultGPR, node, UseChildrenCalledExplicitly);
break;
@@ -3786,16 +4007,16 @@ void SpeculativeJIT::compile(Node* node)
base.use();
- JITCompiler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(baseGPR));
+ JITCompiler::Jump notCell = m_jit.branchTest64(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
- cachedGetById(node->origin.semantic, baseGPR, resultGPR, node->identifierNumber(), notCell);
+ cachedGetById(node->codeOrigin, baseGPR, resultGPR, node->identifierNumber(), notCell);
jsValueResult(resultGPR, node, UseChildrenCalledExplicitly);
break;
}
default:
- DFG_CRASH(m_jit.graph(), node, "Bad use kind");
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
break;
@@ -3812,7 +4033,7 @@ void SpeculativeJIT::compile(Node* node)
SpeculateCellOperand base(this, node->child1());
GPRReg baseGPR = base.gpr();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultGPR = result.gpr();
@@ -3820,7 +4041,7 @@ void SpeculativeJIT::compile(Node* node)
flushRegisters();
- cachedGetById(node->origin.semantic, baseGPR, resultGPR, node->identifierNumber(), JITCompiler::Jump(), DontSpill);
+ cachedGetById(node->codeOrigin, baseGPR, resultGPR, node->identifierNumber(), JITCompiler::Jump(), DontSpill);
jsValueResult(resultGPR, node, UseChildrenCalledExplicitly);
break;
@@ -3830,22 +4051,22 @@ void SpeculativeJIT::compile(Node* node)
JSValueOperand base(this, node->child1());
GPRReg baseGPR = base.gpr();
- GPRFlushedCallResult result(this);
+ GPRResult result(this);
GPRReg resultGPR = result.gpr();
base.use();
flushRegisters();
- JITCompiler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(baseGPR));
+ JITCompiler::Jump notCell = m_jit.branchTest64(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
- cachedGetById(node->origin.semantic, baseGPR, resultGPR, node->identifierNumber(), notCell, DontSpill);
+ cachedGetById(node->codeOrigin, baseGPR, resultGPR, node->identifierNumber(), notCell, DontSpill);
jsValueResult(resultGPR, node, UseChildrenCalledExplicitly);
break;
}
default:
- DFG_CRASH(m_jit.graph(), node, "Bad use kind");
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
break;
@@ -3855,33 +4076,17 @@ void SpeculativeJIT::compile(Node* node)
compileGetArrayLength(node);
break;
- case CheckCell: {
- SpeculateCellOperand cell(this, node->child1());
- speculationCheck(BadCell, JSValueSource::unboxedCell(cell.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, cell.gpr(), node->cellOperand()->cell()));
- noResult(node);
- break;
- }
-
- case CheckNotEmpty: {
- JSValueOperand operand(this, node->child1());
- GPRReg gpr = operand.gpr();
- speculationCheck(TDZFailure, JSValueSource(), nullptr, m_jit.branchTest64(JITCompiler::Zero, gpr));
+ case CheckFunction: {
+ SpeculateCellOperand function(this, node->child1());
+ speculationCheck(BadFunction, JSValueSource::unboxedCell(function.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, function.gpr(), node->function()));
noResult(node);
break;
}
-
- case CheckIdent:
- compileCheckIdent(node);
- break;
-
- case GetExecutable: {
+
+ case CheckExecutable: {
SpeculateCellOperand function(this, node->child1());
- GPRTemporary result(this, Reuse, function);
- GPRReg functionGPR = function.gpr();
- GPRReg resultGPR = result.gpr();
- speculateCellType(node->child1(), functionGPR, SpecFunction, JSFunctionType);
- m_jit.loadPtr(JITCompiler::Address(functionGPR, JSFunction::offsetOfExecutable()), resultGPR);
- cellResult(resultGPR, node);
+ speculationCheck(BadExecutable, JSValueSource::unboxedCell(function.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, JITCompiler::Address(function.gpr(), JSFunction::offsetOfExecutable()), node->executable()));
+ noResult(node);
break;
}
@@ -3891,28 +4096,32 @@ void SpeculativeJIT::compile(Node* node)
ASSERT(node->structureSet().size());
ExitKind exitKind;
- if (node->child1()->hasConstant())
- exitKind = BadConstantCache;
+ if (node->child1()->op() == WeakJSConstant)
+ exitKind = BadWeakConstantCache;
else
exitKind = BadCache;
if (node->structureSet().size() == 1) {
speculationCheck(
exitKind, JSValueSource::unboxedCell(base.gpr()), 0,
- m_jit.branchWeakStructure(
+ m_jit.branchWeakPtr(
JITCompiler::NotEqual,
- JITCompiler::Address(base.gpr(), JSCell::structureIDOffset()),
+ JITCompiler::Address(base.gpr(), JSCell::structureOffset()),
node->structureSet()[0]));
} else {
+ GPRTemporary structure(this);
+
+ m_jit.loadPtr(JITCompiler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr());
+
JITCompiler::JumpList done;
for (size_t i = 0; i < node->structureSet().size() - 1; ++i)
- done.append(m_jit.branchWeakStructure(JITCompiler::Equal, MacroAssembler::Address(base.gpr(), JSCell::structureIDOffset()), node->structureSet()[i]));
+ done.append(m_jit.branchWeakPtr(JITCompiler::Equal, structure.gpr(), node->structureSet()[i]));
speculationCheck(
exitKind, JSValueSource::unboxedCell(base.gpr()), 0,
- m_jit.branchWeakStructure(
- JITCompiler::NotEqual, MacroAssembler::Address(base.gpr(), JSCell::structureIDOffset()), node->structureSet().last()));
+ m_jit.branchWeakPtr(
+ JITCompiler::NotEqual, structure.gpr(), node->structureSet().last()));
done.link(&m_jit);
}
@@ -3921,19 +4130,45 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case PutStructure: {
- Structure* oldStructure = node->transition()->previous;
- Structure* newStructure = node->transition()->next;
+ case StructureTransitionWatchpoint: {
+ // There is a fascinating question here of what to do about array profiling.
+ // We *could* try to tell the OSR exit about where the base of the access is.
+ // The DFG will have kept it alive, though it may not be in a register, and
+ // we shouldn't really load it since that could be a waste. For now though,
+ // we'll just rely on the fact that when a watchpoint fires then that's
+ // quite a hint already.
+
+ m_jit.addWeakReference(node->structure());
+#if !ASSERT_DISABLED
+ SpeculateCellOperand op1(this, node->child1());
+ JITCompiler::Jump isOK = m_jit.branchPtr(JITCompiler::Equal, JITCompiler::Address(op1.gpr(), JSCell::structureOffset()), TrustedImmPtr(node->structure()));
+ m_jit.breakpoint();
+ isOK.link(&m_jit);
+#else
+ speculateCell(node->child1());
+#endif
+
+ noResult(node);
+ break;
+ }
+
+ case PhantomPutStructure: {
+ ASSERT(isKnownCell(node->child1().node()));
+ m_jit.jitCode()->common.notifyCompilingStructureTransition(m_jit.graph().m_plan, m_jit.codeBlock(), node);
+ noResult(node);
+ break;
+ }
+
+ case PutStructure: {
m_jit.jitCode()->common.notifyCompilingStructureTransition(m_jit.graph().m_plan, m_jit.codeBlock(), node);
SpeculateCellOperand base(this, node->child1());
+ GPRTemporary scratch1(this);
+ GPRTemporary scratch2(this);
GPRReg baseGPR = base.gpr();
- ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType());
- ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
- ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
- m_jit.store32(MacroAssembler::TrustedImm32(newStructure->id()), MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()));
+ m_jit.storePtr(MacroAssembler::TrustedImmPtr(node->structureTransitionData().newStructure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
noResult(node);
break;
@@ -3975,15 +4210,14 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case GetByOffset:
- case GetGetterSetterByOffset: {
+ case GetByOffset: {
StorageOperand storage(this, node->child1());
GPRTemporary result(this, Reuse, storage);
GPRReg storageGPR = storage.gpr();
GPRReg resultGPR = result.gpr();
- StorageAccessData& storageAccessData = node->storageAccessData();
+ StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node->storageAccessDataIndex()];
m_jit.load64(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset)), resultGPR);
@@ -3991,32 +4225,6 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case GetGetter: {
- SpeculateCellOperand op1(this, node->child1());
- GPRTemporary result(this, Reuse, op1);
-
- GPRReg op1GPR = op1.gpr();
- GPRReg resultGPR = result.gpr();
-
- m_jit.loadPtr(JITCompiler::Address(op1GPR, GetterSetter::offsetOfGetter()), resultGPR);
-
- cellResult(resultGPR, node);
- break;
- }
-
- case GetSetter: {
- SpeculateCellOperand op1(this, node->child1());
- GPRTemporary result(this, Reuse, op1);
-
- GPRReg op1GPR = op1.gpr();
- GPRReg resultGPR = result.gpr();
-
- m_jit.loadPtr(JITCompiler::Address(op1GPR, GetterSetter::offsetOfSetter()), resultGPR);
-
- cellResult(resultGPR, node);
- break;
- }
-
case PutByOffset: {
StorageOperand storage(this, node->child1());
JSValueOperand value(this, node->child3());
@@ -4028,29 +4236,13 @@ void SpeculativeJIT::compile(Node* node)
speculate(node, node->child2());
- StorageAccessData& storageAccessData = node->storageAccessData();
+ StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node->storageAccessDataIndex()];
m_jit.store64(valueGPR, JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset)));
noResult(node);
break;
}
-
- case PutByIdFlush: {
- SpeculateCellOperand base(this, node->child1());
- JSValueOperand value(this, node->child2());
- GPRTemporary scratch(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg valueGPR = value.gpr();
- GPRReg scratchGPR = scratch.gpr();
- flushRegisters();
-
- cachedPutById(node->origin.semantic, baseGPR, valueGPR, scratchGPR, node->identifierNumber(), NotDirect, MacroAssembler::Jump(), DontSpill);
-
- noResult(node);
- break;
- }
case PutById: {
SpeculateCellOperand base(this, node->child1());
@@ -4061,7 +4253,7 @@ void SpeculativeJIT::compile(Node* node)
GPRReg valueGPR = value.gpr();
GPRReg scratchGPR = scratch.gpr();
- cachedPutById(node->origin.semantic, baseGPR, valueGPR, scratchGPR, node->identifierNumber(), NotDirect);
+ cachedPutById(node->codeOrigin, baseGPR, valueGPR, scratchGPR, node->identifierNumber(), NotDirect);
noResult(node);
break;
@@ -4076,7 +4268,7 @@ void SpeculativeJIT::compile(Node* node)
GPRReg valueGPR = value.gpr();
GPRReg scratchGPR = scratch.gpr();
- cachedPutById(node->origin.semantic, baseGPR, valueGPR, scratchGPR, node->identifierNumber(), Direct);
+ cachedPutById(node->codeOrigin, baseGPR, valueGPR, scratchGPR, node->identifierNumber(), Direct);
noResult(node);
break;
@@ -4085,27 +4277,68 @@ void SpeculativeJIT::compile(Node* node)
case GetGlobalVar: {
GPRTemporary result(this);
- m_jit.load64(node->variablePointer(), result.gpr());
+ m_jit.load64(node->registerPointer(), result.gpr());
jsValueResult(result.gpr(), node);
break;
}
case PutGlobalVar: {
- JSValueOperand value(this, node->child2());
+ JSValueOperand value(this, node->child1());
- m_jit.store64(value.gpr(), node->variablePointer());
+ m_jit.store64(value.gpr(), node->registerPointer());
noResult(node);
break;
}
case NotifyWrite: {
- compileNotifyWrite(node);
+ VariableWatchpointSet* set = node->variableWatchpointSet();
+
+ JSValueOperand value(this, node->child1());
+ GPRReg valueGPR = value.gpr();
+
+ GPRTemporary temp(this);
+ GPRReg tempGPR = temp.gpr();
+
+ m_jit.load8(set->addressOfState(), tempGPR);
+
+ JITCompiler::JumpList ready;
+
+ ready.append(m_jit.branch32(JITCompiler::Equal, tempGPR, TrustedImm32(IsInvalidated)));
+
+ if (set->state() == ClearWatchpoint) {
+ JITCompiler::Jump isWatched =
+ m_jit.branch32(JITCompiler::NotEqual, tempGPR, TrustedImm32(ClearWatchpoint));
+
+ m_jit.store64(valueGPR, set->addressOfInferredValue());
+ m_jit.store8(TrustedImm32(IsWatched), set->addressOfState());
+ ready.append(m_jit.jump());
+
+ isWatched.link(&m_jit);
+ }
+
+ ready.append(m_jit.branch64(
+ JITCompiler::Equal,
+ JITCompiler::AbsoluteAddress(set->addressOfInferredValue()), valueGPR));
+
+ JITCompiler::Jump slowCase = m_jit.branchTest8(
+ JITCompiler::NonZero, JITCompiler::AbsoluteAddress(set->addressOfSetIsNotEmpty()));
+ m_jit.store8(TrustedImm32(IsInvalidated), set->addressOfState());
+ m_jit.move(TrustedImm64(JSValue::encode(JSValue())), tempGPR);
+ m_jit.store64(tempGPR, set->addressOfInferredValue());
+
+ ready.link(&m_jit);
+
+ addSlowPathGenerator(
+ slowPathCall(slowCase, this, operationInvalidate, NoResult, set));
+
+ noResult(node);
break;
}
- case VarInjectionWatchpoint: {
+ case VarInjectionWatchpoint:
+ case VariableWatchpoint: {
noResult(node);
break;
}
@@ -4115,10 +4348,8 @@ void SpeculativeJIT::compile(Node* node)
GPRTemporary structure(this);
// Speculate that base 'ImplementsDefaultHasInstance'.
- speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branchTest8(
- MacroAssembler::Zero,
- MacroAssembler::Address(base.gpr(), JSCell::typeInfoFlagsOffset()),
- MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance)));
+ m_jit.loadPtr(MacroAssembler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(structure.gpr(), Structure::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance)));
noResult(node);
break;
@@ -4134,9 +4365,8 @@ void SpeculativeJIT::compile(Node* node)
GPRTemporary result(this);
GPRTemporary localGlobalObject(this);
GPRTemporary remoteGlobalObject(this);
- GPRTemporary scratch(this);
- JITCompiler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
+ JITCompiler::Jump isCell = m_jit.branchTest64(JITCompiler::Zero, value.gpr(), GPRInfo::tagMaskRegister);
m_jit.compare64(JITCompiler::Equal, value.gpr(), TrustedImm32(ValueUndefined), result.gpr());
JITCompiler::Jump done = m_jit.jump();
@@ -4147,18 +4377,15 @@ void SpeculativeJIT::compile(Node* node)
m_jit.move(TrustedImm32(0), result.gpr());
notMasqueradesAsUndefined = m_jit.jump();
} else {
- JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(
- JITCompiler::NonZero,
- JITCompiler::Address(value.gpr(), JSCell::typeInfoFlagsOffset()),
- TrustedImm32(MasqueradesAsUndefined));
+ m_jit.loadPtr(JITCompiler::Address(value.gpr(), JSCell::structureOffset()), result.gpr());
+ JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::NonZero, JITCompiler::Address(result.gpr(), Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
m_jit.move(TrustedImm32(0), result.gpr());
notMasqueradesAsUndefined = m_jit.jump();
isMasqueradesAsUndefined.link(&m_jit);
GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
- m_jit.move(TrustedImmPtr(m_jit.globalObjectFor(node->origin.semantic)), localGlobalObjectGPR);
- m_jit.emitLoadStructure(value.gpr(), result.gpr(), scratch.gpr());
+ m_jit.move(TrustedImmPtr(m_jit.globalObjectFor(node->codeOrigin)), localGlobalObjectGPR);
m_jit.loadPtr(JITCompiler::Address(result.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR);
m_jit.comparePtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, result.gpr());
}
@@ -4196,12 +4423,10 @@ void SpeculativeJIT::compile(Node* node)
JSValueOperand value(this, node->child1());
GPRTemporary result(this, Reuse, value);
- JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs());
+ JITCompiler::Jump isNotCell = m_jit.branchTest64(JITCompiler::NonZero, value.gpr(), GPRInfo::tagMaskRegister);
- m_jit.compare8(JITCompiler::Equal,
- JITCompiler::Address(value.gpr(), JSCell::typeInfoTypeOffset()),
- TrustedImm32(StringType),
- result.gpr());
+ m_jit.loadPtr(JITCompiler::Address(value.gpr(), JSCell::structureOffset()), result.gpr());
+ m_jit.compare8(JITCompiler::Equal, JITCompiler::Address(result.gpr(), Structure::typeInfoTypeOffset()), TrustedImm32(StringType), result.gpr());
m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
JITCompiler::Jump done = m_jit.jump();
@@ -4212,40 +4437,87 @@ void SpeculativeJIT::compile(Node* node)
jsValueResult(result.gpr(), node, DataFormatJSBoolean);
break;
}
-
+
case IsObject: {
JSValueOperand value(this, node->child1());
- GPRTemporary result(this, Reuse, value);
-
- JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs());
-
- m_jit.compare8(JITCompiler::AboveOrEqual,
- JITCompiler::Address(value.gpr(), JSCell::typeInfoTypeOffset()),
- TrustedImm32(ObjectType),
- result.gpr());
- m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
- JITCompiler::Jump done = m_jit.jump();
-
- isNotCell.link(&m_jit);
- m_jit.move(TrustedImm32(ValueFalse), result.gpr());
-
- done.link(&m_jit);
+ GPRReg valueGPR = value.gpr();
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+ flushRegisters();
+ callOperation(operationIsObject, resultGPR, valueGPR);
+ m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
jsValueResult(result.gpr(), node, DataFormatJSBoolean);
break;
}
- case IsObjectOrNull: {
- compileIsObjectOrNull(node);
- break;
- }
-
case IsFunction: {
- compileIsFunction(node);
+ JSValueOperand value(this, node->child1());
+ GPRReg valueGPR = value.gpr();
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+ flushRegisters();
+ callOperation(operationIsFunction, resultGPR, valueGPR);
+ m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
+ jsValueResult(result.gpr(), node, DataFormatJSBoolean);
break;
}
case TypeOf: {
- compileTypeOf(node);
+ JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
+ GPRReg valueGPR = value.gpr();
+ GPRTemporary temp(this);
+ GPRReg tempGPR = temp.gpr();
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+ JITCompiler::JumpList doneJumps;
+
+ flushRegisters();
+
+ ASSERT(node->child1().useKind() == UntypedUse || node->child1().useKind() == CellUse || node->child1().useKind() == StringUse);
+
+ JITCompiler::Jump isNotCell = m_jit.branchTest64(JITCompiler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
+ if (node->child1().useKind() != UntypedUse)
+ DFG_TYPE_CHECK(JSValueSource(valueGPR), node->child1(), SpecCell, isNotCell);
+
+ if (!node->child1()->shouldSpeculateObject() || node->child1().useKind() == StringUse) {
+ m_jit.loadPtr(JITCompiler::Address(valueGPR, JSCell::structureOffset()), tempGPR);
+ JITCompiler::Jump notString = m_jit.branch8(JITCompiler::NotEqual, JITCompiler::Address(tempGPR, Structure::typeInfoTypeOffset()), TrustedImm32(StringType));
+ if (node->child1().useKind() == StringUse)
+ DFG_TYPE_CHECK(JSValueSource(valueGPR), node->child1(), SpecString, notString);
+ m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.stringString()), resultGPR);
+ doneJumps.append(m_jit.jump());
+ if (node->child1().useKind() != StringUse) {
+ notString.link(&m_jit);
+ callOperation(operationTypeOf, resultGPR, valueGPR);
+ doneJumps.append(m_jit.jump());
+ }
+ } else {
+ callOperation(operationTypeOf, resultGPR, valueGPR);
+ doneJumps.append(m_jit.jump());
+ }
+
+ if (node->child1().useKind() == UntypedUse) {
+ isNotCell.link(&m_jit);
+ JITCompiler::Jump notNumber = m_jit.branchTest64(JITCompiler::Zero, valueGPR, GPRInfo::tagTypeNumberRegister);
+ m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.numberString()), resultGPR);
+ doneJumps.append(m_jit.jump());
+ notNumber.link(&m_jit);
+
+ JITCompiler::Jump notUndefined = m_jit.branch64(JITCompiler::NotEqual, valueGPR, JITCompiler::TrustedImm64(ValueUndefined));
+ m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.undefinedString()), resultGPR);
+ doneJumps.append(m_jit.jump());
+ notUndefined.link(&m_jit);
+
+ JITCompiler::Jump notNull = m_jit.branch64(JITCompiler::NotEqual, valueGPR, JITCompiler::TrustedImm64(ValueNull));
+ m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.objectString()), resultGPR);
+ doneJumps.append(m_jit.jump());
+ notNull.link(&m_jit);
+
+ // Only boolean left
+ m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.booleanString()), resultGPR);
+ }
+ doneJumps.link(&m_jit);
+ cellResult(resultGPR, node);
break;
}
@@ -4254,94 +4526,368 @@ void SpeculativeJIT::compile(Node* node)
case Call:
case Construct:
- case CallVarargs:
- case CallForwardVarargs:
- case ConstructVarargs:
- case ConstructForwardVarargs:
emitCall(node);
break;
+
+ case CreateActivation: {
+ RELEASE_ASSERT(!node->codeOrigin.inlineCallFrame);
- case LoadVarargs: {
- LoadVarargsData* data = node->loadVarargsData();
+ JSValueOperand value(this, node->child1());
+ GPRTemporary result(this, Reuse, value);
- GPRReg argumentsGPR;
- {
- JSValueOperand arguments(this, node->child1());
- argumentsGPR = arguments.gpr();
- flushRegisters();
- }
+ GPRReg valueGPR = value.gpr();
+ GPRReg resultGPR = result.gpr();
- callOperation(operationSizeOfVarargs, GPRInfo::returnValueGPR, argumentsGPR, data->offset);
+ m_jit.move(valueGPR, resultGPR);
- lock(GPRInfo::returnValueGPR);
- {
- JSValueOperand arguments(this, node->child1());
- argumentsGPR = arguments.gpr();
- flushRegisters();
- }
- unlock(GPRInfo::returnValueGPR);
+ JITCompiler::Jump notCreated = m_jit.branchTest64(JITCompiler::Zero, resultGPR);
- // FIXME: There is a chance that we will call an effectful length property twice. This is safe
- // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance
- // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right
- // past the sizing.
- // https://bugs.webkit.org/show_bug.cgi?id=141448
-
- GPRReg argCountIncludingThisGPR =
- JITCompiler::selectScratchGPR(GPRInfo::returnValueGPR, argumentsGPR);
+ addSlowPathGenerator(
+ slowPathCall(
+ notCreated, this, operationCreateActivation, resultGPR,
+ framePointerOffsetToGetActivationRegisters()));
- m_jit.add32(TrustedImm32(1), GPRInfo::returnValueGPR, argCountIncludingThisGPR);
- speculationCheck(
- VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32(
- MacroAssembler::Above,
- argCountIncludingThisGPR,
- TrustedImm32(data->limit)));
+ cellResult(resultGPR, node);
+ break;
+ }
- m_jit.store32(argCountIncludingThisGPR, JITCompiler::payloadFor(data->machineCount));
+ case FunctionReentryWatchpoint: {
+ noResult(node);
+ break;
+ }
- callOperation(operationLoadVarargs, data->machineStart.offset(), argumentsGPR, data->offset, GPRInfo::returnValueGPR, data->mandatoryMinimum);
+ case CreateArguments: {
+ JSValueOperand value(this, node->child1());
+ GPRTemporary result(this, Reuse, value);
+
+ GPRReg valueGPR = value.gpr();
+ GPRReg resultGPR = result.gpr();
+ m_jit.move(valueGPR, resultGPR);
+
+ JITCompiler::Jump notCreated = m_jit.branchTest64(JITCompiler::Zero, resultGPR);
+
+ if (node->codeOrigin.inlineCallFrame) {
+ addSlowPathGenerator(
+ slowPathCall(
+ notCreated, this, operationCreateInlinedArguments, resultGPR,
+ node->codeOrigin.inlineCallFrame));
+ } else {
+ addSlowPathGenerator(
+ slowPathCall(notCreated, this, operationCreateArguments, resultGPR));
+ }
+
+ cellResult(resultGPR, node);
+ break;
+ }
+
+ case TearOffActivation: {
+ RELEASE_ASSERT(!node->codeOrigin.inlineCallFrame);
+
+ JSValueOperand activationValue(this, node->child1());
+ GPRTemporary scratch(this);
+ GPRReg activationValueGPR = activationValue.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ JITCompiler::Jump notCreated = m_jit.branchTest64(JITCompiler::Zero, activationValueGPR);
+
+ SymbolTable* symbolTable = m_jit.symbolTableFor(node->codeOrigin);
+ int registersOffset = JSActivation::registersOffset(symbolTable);
+
+ int bytecodeCaptureStart = symbolTable->captureStart();
+ int machineCaptureStart = m_jit.graph().m_machineCaptureStart;
+ for (int i = symbolTable->captureCount(); i--;) {
+ m_jit.load64(
+ JITCompiler::Address(
+ GPRInfo::callFrameRegister,
+ (machineCaptureStart - i) * sizeof(Register)),
+ scratchGPR);
+ m_jit.store64(
+ scratchGPR,
+ JITCompiler::Address(
+ activationValueGPR,
+ registersOffset + (bytecodeCaptureStart - i) * sizeof(Register)));
+ }
+ m_jit.addPtr(TrustedImm32(registersOffset), activationValueGPR, scratchGPR);
+ m_jit.storePtr(scratchGPR, JITCompiler::Address(activationValueGPR, JSActivation::offsetOfRegisters()));
+
+ notCreated.link(&m_jit);
noResult(node);
break;
}
+
+ case TearOffArguments: {
+ JSValueOperand unmodifiedArgumentsValue(this, node->child1());
+ JSValueOperand activationValue(this, node->child2());
+ GPRReg unmodifiedArgumentsValueGPR = unmodifiedArgumentsValue.gpr();
+ GPRReg activationValueGPR = activationValue.gpr();
+
+ JITCompiler::Jump created = m_jit.branchTest64(JITCompiler::NonZero, unmodifiedArgumentsValueGPR);
+
+ if (node->codeOrigin.inlineCallFrame) {
+ addSlowPathGenerator(
+ slowPathCall(
+ created, this, operationTearOffInlinedArguments, NoResult,
+ unmodifiedArgumentsValueGPR, activationValueGPR, node->codeOrigin.inlineCallFrame));
+ } else {
+ addSlowPathGenerator(
+ slowPathCall(
+ created, this, operationTearOffArguments, NoResult, unmodifiedArgumentsValueGPR, activationValueGPR));
+ }
- case ForwardVarargs: {
- compileForwardVarargs(node);
+ noResult(node);
break;
}
- case CreateActivation: {
- compileCreateActivation(node);
+ case GetMyArgumentsLength: {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ if (!isEmptySpeculation(
+ m_state.variables().operand(
+ m_jit.graph().argumentsRegisterFor(node->codeOrigin)).m_type)) {
+ speculationCheck(
+ ArgumentsEscaped, JSValueRegs(), 0,
+ m_jit.branchTest64(
+ JITCompiler::NonZero,
+ JITCompiler::addressFor(
+ m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin))));
+ }
+
+ RELEASE_ASSERT(!node->codeOrigin.inlineCallFrame);
+ m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultGPR);
+ m_jit.sub32(TrustedImm32(1), resultGPR);
+ int32Result(resultGPR, node);
break;
}
- case CreateDirectArguments: {
- compileCreateDirectArguments(node);
+ case GetMyArgumentsLengthSafe: {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ JITCompiler::Jump created = m_jit.branchTest64(
+ JITCompiler::NonZero,
+ JITCompiler::addressFor(
+ m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin)));
+
+ if (node->codeOrigin.inlineCallFrame) {
+ m_jit.move(
+ Imm64(JSValue::encode(jsNumber(node->codeOrigin.inlineCallFrame->arguments.size() - 1))),
+ resultGPR);
+ } else {
+ m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultGPR);
+ m_jit.sub32(TrustedImm32(1), resultGPR);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, resultGPR);
+ }
+
+ // FIXME: the slow path generator should perform a forward speculation that the
+ // result is an integer. For now we postpone the speculation by having this return
+ // a JSValue.
+
+ addSlowPathGenerator(
+ slowPathCall(
+ created, this, operationGetArgumentsLength, resultGPR,
+ m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin).offset()));
+
+ jsValueResult(resultGPR, node);
break;
}
- case GetFromArguments: {
- compileGetFromArguments(node);
+ case GetMyArgumentByVal: {
+ SpeculateStrictInt32Operand index(this, node->child1());
+ GPRTemporary result(this);
+ GPRReg indexGPR = index.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ if (!isEmptySpeculation(
+ m_state.variables().operand(
+ m_jit.graph().argumentsRegisterFor(node->codeOrigin)).m_type)) {
+ speculationCheck(
+ ArgumentsEscaped, JSValueRegs(), 0,
+ m_jit.branchTest64(
+ JITCompiler::NonZero,
+ JITCompiler::addressFor(
+ m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin))));
+ }
+
+ m_jit.add32(TrustedImm32(1), indexGPR, resultGPR);
+ if (node->codeOrigin.inlineCallFrame) {
+ speculationCheck(
+ Uncountable, JSValueRegs(), 0,
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual,
+ resultGPR,
+ Imm32(node->codeOrigin.inlineCallFrame->arguments.size())));
+ } else {
+ speculationCheck(
+ Uncountable, JSValueRegs(), 0,
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual,
+ resultGPR,
+ JITCompiler::payloadFor(JSStack::ArgumentCount)));
+ }
+
+ JITCompiler::JumpList slowArgument;
+ JITCompiler::JumpList slowArgumentOutOfBounds;
+ if (m_jit.symbolTableFor(node->codeOrigin)->slowArguments()) {
+ RELEASE_ASSERT(!node->codeOrigin.inlineCallFrame);
+ const SlowArgument* slowArguments = m_jit.graph().m_slowArguments.get();
+
+ slowArgumentOutOfBounds.append(
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual, indexGPR,
+ Imm32(m_jit.symbolTableFor(node->codeOrigin)->parameterCount())));
+
+ COMPILE_ASSERT(sizeof(SlowArgument) == 8, SlowArgument_size_is_eight_bytes);
+ m_jit.move(ImmPtr(slowArguments), resultGPR);
+ m_jit.load32(
+ JITCompiler::BaseIndex(
+ resultGPR, indexGPR, JITCompiler::TimesEight,
+ OBJECT_OFFSETOF(SlowArgument, index)),
+ resultGPR);
+ m_jit.signExtend32ToPtr(resultGPR, resultGPR);
+ m_jit.load64(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight),
+ resultGPR);
+ slowArgument.append(m_jit.jump());
+ }
+ slowArgumentOutOfBounds.link(&m_jit);
+
+ m_jit.signExtend32ToPtr(resultGPR, resultGPR);
+
+ m_jit.load64(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight, m_jit.offsetOfArgumentsIncludingThis(node->codeOrigin)),
+ resultGPR);
+
+ slowArgument.link(&m_jit);
+ jsValueResult(resultGPR, node);
break;
}
- case PutToArguments: {
- compilePutToArguments(node);
+ case GetMyArgumentByValSafe: {
+ SpeculateStrictInt32Operand index(this, node->child1());
+ GPRTemporary result(this);
+ GPRReg indexGPR = index.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ JITCompiler::JumpList slowPath;
+ slowPath.append(
+ m_jit.branchTest64(
+ JITCompiler::NonZero,
+ JITCompiler::addressFor(
+ m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin))));
+
+ m_jit.add32(TrustedImm32(1), indexGPR, resultGPR);
+ if (node->codeOrigin.inlineCallFrame) {
+ slowPath.append(
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual,
+ resultGPR,
+ Imm32(node->codeOrigin.inlineCallFrame->arguments.size())));
+ } else {
+ slowPath.append(
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual,
+ resultGPR,
+ JITCompiler::payloadFor(JSStack::ArgumentCount)));
+ }
+
+ JITCompiler::JumpList slowArgument;
+ JITCompiler::JumpList slowArgumentOutOfBounds;
+ if (m_jit.symbolTableFor(node->codeOrigin)->slowArguments()) {
+ RELEASE_ASSERT(!node->codeOrigin.inlineCallFrame);
+ const SlowArgument* slowArguments = m_jit.graph().m_slowArguments.get();
+
+ slowArgumentOutOfBounds.append(
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual, indexGPR,
+ Imm32(m_jit.symbolTableFor(node->codeOrigin)->parameterCount())));
+
+ COMPILE_ASSERT(sizeof(SlowArgument) == 8, SlowArgument_size_is_eight_bytes);
+ m_jit.move(ImmPtr(slowArguments), resultGPR);
+ m_jit.load32(
+ JITCompiler::BaseIndex(
+ resultGPR, indexGPR, JITCompiler::TimesEight,
+ OBJECT_OFFSETOF(SlowArgument, index)),
+ resultGPR);
+ m_jit.signExtend32ToPtr(resultGPR, resultGPR);
+ m_jit.load64(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight),
+ resultGPR);
+ slowArgument.append(m_jit.jump());
+ }
+ slowArgumentOutOfBounds.link(&m_jit);
+
+ m_jit.signExtend32ToPtr(resultGPR, resultGPR);
+
+ m_jit.load64(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight, m_jit.offsetOfArgumentsIncludingThis(node->codeOrigin)),
+ resultGPR);
+
+ if (node->codeOrigin.inlineCallFrame) {
+ addSlowPathGenerator(
+ slowPathCall(
+ slowPath, this, operationGetInlinedArgumentByVal, resultGPR,
+ m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin).offset(),
+ node->codeOrigin.inlineCallFrame,
+ indexGPR));
+ } else {
+ addSlowPathGenerator(
+ slowPathCall(
+ slowPath, this, operationGetArgumentByVal, resultGPR,
+ m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin).offset(),
+ indexGPR));
+ }
+
+ slowArgument.link(&m_jit);
+ jsValueResult(resultGPR, node);
break;
}
- case CreateScopedArguments: {
- compileCreateScopedArguments(node);
+ case CheckArgumentsNotCreated: {
+ ASSERT(!isEmptySpeculation(
+ m_state.variables().operand(
+ m_jit.graph().argumentsRegisterFor(node->codeOrigin)).m_type));
+ speculationCheck(
+ ArgumentsEscaped, JSValueRegs(), 0,
+ m_jit.branchTest64(
+ JITCompiler::NonZero,
+ JITCompiler::addressFor(
+ m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin))));
+ noResult(node);
break;
}
- case CreateClonedArguments: {
- compileCreateClonedArguments(node);
+ case NewFunctionNoCheck:
+ compileNewFunctionNoCheck(node);
+ break;
+
+ case NewFunction: {
+ JSValueOperand value(this, node->child1());
+ GPRTemporary result(this, Reuse, value);
+
+ GPRReg valueGPR = value.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.move(valueGPR, resultGPR);
+
+ JITCompiler::Jump notCreated = m_jit.branchTest64(JITCompiler::Zero, resultGPR);
+
+ addSlowPathGenerator(
+ slowPathCall(
+ notCreated, this, operationNewFunction,
+ resultGPR, m_jit.codeBlock()->functionDecl(node->functionDeclIndex())));
+
+ jsValueResult(resultGPR, node);
break;
}
- case NewFunction:
- compileNewFunction(node);
+ case NewFunctionExpression:
+ compileNewFunctionExpression(node);
break;
case In:
@@ -4362,16 +4908,14 @@ void SpeculativeJIT::compile(Node* node)
break;
case CheckWatchdogTimer:
- ASSERT(m_jit.vm()->watchdog);
speculationCheck(
WatchdogTimerFired, JSValueRegs(), 0,
m_jit.branchTest8(
JITCompiler::NonZero,
- JITCompiler::AbsoluteAddress(m_jit.vm()->watchdog->timerDidFireAddress())));
+ JITCompiler::AbsoluteAddress(m_jit.vm()->watchdog.timerDidFireAddress())));
break;
case Phantom:
- case Check:
DFG_NODE_DO_TO_CHILDREN(m_jit.graph(), node, speculate);
noResult(node);
break;
@@ -4386,321 +4930,16 @@ void SpeculativeJIT::compile(Node* node)
break;
case Unreachable:
- DFG_CRASH(m_jit.graph(), node, "Unexpected Unreachable node");
+ RELEASE_ASSERT_NOT_REACHED();
break;
- case StoreBarrier: {
+ case StoreBarrier:
+ case ConditionalStoreBarrier:
+ case StoreBarrierWithNullCheck: {
compileStoreBarrier(node);
break;
}
- case GetEnumerableLength: {
- SpeculateCellOperand enumerator(this, node->child1());
- GPRFlushedCallResult result(this);
- GPRReg resultGPR = result.gpr();
-
- m_jit.load32(MacroAssembler::Address(enumerator.gpr(), JSPropertyNameEnumerator::indexedLengthOffset()), resultGPR);
- int32Result(resultGPR, node);
- break;
- }
- case HasGenericProperty: {
- JSValueOperand base(this, node->child1());
- SpeculateCellOperand property(this, node->child2());
- GPRFlushedCallResult result(this);
- GPRReg resultGPR = result.gpr();
-
- flushRegisters();
- callOperation(operationHasGenericProperty, resultGPR, base.gpr(), property.gpr());
- jsValueResult(resultGPR, node, DataFormatJSBoolean);
- break;
- }
- case HasStructureProperty: {
- JSValueOperand base(this, node->child1());
- SpeculateCellOperand property(this, node->child2());
- SpeculateCellOperand enumerator(this, node->child3());
- GPRTemporary result(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg propertyGPR = property.gpr();
- GPRReg resultGPR = result.gpr();
-
- m_jit.load32(MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), resultGPR);
- MacroAssembler::Jump wrongStructure = m_jit.branch32(MacroAssembler::NotEqual,
- resultGPR,
- MacroAssembler::Address(enumerator.gpr(), JSPropertyNameEnumerator::cachedStructureIDOffset()));
-
- moveTrueTo(resultGPR);
- MacroAssembler::Jump done = m_jit.jump();
-
- done.link(&m_jit);
-
- addSlowPathGenerator(slowPathCall(wrongStructure, this, operationHasGenericProperty, resultGPR, baseGPR, propertyGPR));
- jsValueResult(resultGPR, node, DataFormatJSBoolean);
- break;
- }
- case HasIndexedProperty: {
- SpeculateCellOperand base(this, node->child1());
- SpeculateStrictInt32Operand index(this, node->child2());
- GPRTemporary result(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg indexGPR = index.gpr();
- GPRReg resultGPR = result.gpr();
-
- MacroAssembler::JumpList slowCases;
- ArrayMode mode = node->arrayMode();
- switch (mode.type()) {
- case Array::Int32:
- case Array::Contiguous: {
- ASSERT(!!node->child3());
- StorageOperand storage(this, node->child3());
- GPRTemporary scratch(this);
-
- GPRReg storageGPR = storage.gpr();
- GPRReg scratchGPR = scratch.gpr();
-
- MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
- if (mode.isInBounds())
- speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
- else
- slowCases.append(outOfBounds);
-
- m_jit.load64(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchGPR);
- slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, scratchGPR));
- moveTrueTo(resultGPR);
- break;
- }
- case Array::Double: {
- ASSERT(!!node->child3());
- StorageOperand storage(this, node->child3());
- FPRTemporary scratch(this);
- FPRReg scratchFPR = scratch.fpr();
- GPRReg storageGPR = storage.gpr();
-
- MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
- if (mode.isInBounds())
- speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
- else
- slowCases.append(outOfBounds);
-
- m_jit.loadDouble(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchFPR);
- slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, scratchFPR, scratchFPR));
- moveTrueTo(resultGPR);
- break;
- }
- case Array::ArrayStorage: {
- ASSERT(!!node->child3());
- StorageOperand storage(this, node->child3());
- GPRTemporary scratch(this);
-
- GPRReg storageGPR = storage.gpr();
- GPRReg scratchGPR = scratch.gpr();
-
- MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
- if (mode.isInBounds())
- speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
- else
- slowCases.append(outOfBounds);
-
- m_jit.load64(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset()), scratchGPR);
- slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, scratchGPR));
- moveTrueTo(resultGPR);
- break;
- }
- default: {
- slowCases.append(m_jit.jump());
- break;
- }
- }
-
- addSlowPathGenerator(slowPathCall(slowCases, this, operationHasIndexedProperty, resultGPR, baseGPR, indexGPR));
-
- jsValueResult(resultGPR, node, DataFormatJSBoolean);
- break;
- }
- case GetDirectPname: {
- Edge& baseEdge = m_jit.graph().varArgChild(node, 0);
- Edge& propertyEdge = m_jit.graph().varArgChild(node, 1);
- Edge& indexEdge = m_jit.graph().varArgChild(node, 2);
- Edge& enumeratorEdge = m_jit.graph().varArgChild(node, 3);
-
- SpeculateCellOperand base(this, baseEdge);
- SpeculateCellOperand property(this, propertyEdge);
- SpeculateStrictInt32Operand index(this, indexEdge);
- SpeculateCellOperand enumerator(this, enumeratorEdge);
- GPRTemporary result(this);
- GPRTemporary scratch1(this);
- GPRTemporary scratch2(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg propertyGPR = property.gpr();
- GPRReg indexGPR = index.gpr();
- GPRReg enumeratorGPR = enumerator.gpr();
- GPRReg resultGPR = result.gpr();
- GPRReg scratch1GPR = scratch1.gpr();
- GPRReg scratch2GPR = scratch2.gpr();
-
- // Check the structure
- m_jit.load32(MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), scratch1GPR);
- MacroAssembler::Jump wrongStructure = m_jit.branch32(MacroAssembler::NotEqual,
- scratch1GPR, MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedStructureIDOffset()));
-
- // Compute the offset
- // If index is less than the enumerator's cached inline storage, then it's an inline access
- MacroAssembler::Jump outOfLineAccess = m_jit.branch32(MacroAssembler::AboveOrEqual,
- indexGPR, MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
-
- m_jit.load64(MacroAssembler::BaseIndex(baseGPR, indexGPR, MacroAssembler::TimesEight, JSObject::offsetOfInlineStorage()), resultGPR);
-
- MacroAssembler::Jump done = m_jit.jump();
-
- // Otherwise it's out of line
- outOfLineAccess.link(&m_jit);
- m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratch2GPR);
- m_jit.move(indexGPR, scratch1GPR);
- m_jit.sub32(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), scratch1GPR);
- m_jit.neg32(scratch1GPR);
- m_jit.signExtend32ToPtr(scratch1GPR, scratch1GPR);
- int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
- m_jit.load64(MacroAssembler::BaseIndex(scratch2GPR, scratch1GPR, MacroAssembler::TimesEight, offsetOfFirstProperty), resultGPR);
-
- done.link(&m_jit);
-
- addSlowPathGenerator(slowPathCall(wrongStructure, this, operationGetByVal, resultGPR, baseGPR, propertyGPR));
-
- jsValueResult(resultGPR, node);
- break;
- }
- case GetPropertyEnumerator: {
- SpeculateCellOperand base(this, node->child1());
- GPRFlushedCallResult result(this);
- GPRReg resultGPR = result.gpr();
-
- flushRegisters();
- callOperation(operationGetPropertyEnumerator, resultGPR, base.gpr());
- cellResult(resultGPR, node);
- break;
- }
- case GetEnumeratorStructurePname:
- case GetEnumeratorGenericPname: {
- SpeculateCellOperand enumerator(this, node->child1());
- SpeculateStrictInt32Operand index(this, node->child2());
- GPRTemporary scratch1(this);
- GPRTemporary result(this);
-
- GPRReg enumeratorGPR = enumerator.gpr();
- GPRReg indexGPR = index.gpr();
- GPRReg scratch1GPR = scratch1.gpr();
- GPRReg resultGPR = result.gpr();
-
- MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, indexGPR,
- MacroAssembler::Address(enumeratorGPR, (op == GetEnumeratorStructurePname)
- ? JSPropertyNameEnumerator::endStructurePropertyIndexOffset()
- : JSPropertyNameEnumerator::endGenericPropertyIndexOffset()));
-
- m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsNull())), resultGPR);
-
- MacroAssembler::Jump done = m_jit.jump();
- inBounds.link(&m_jit);
-
- m_jit.loadPtr(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), scratch1GPR);
- m_jit.load64(MacroAssembler::BaseIndex(scratch1GPR, indexGPR, MacroAssembler::TimesEight), resultGPR);
-
- done.link(&m_jit);
- jsValueResult(resultGPR, node);
- break;
- }
- case ToIndexString: {
- SpeculateInt32Operand index(this, node->child1());
- GPRFlushedCallResult result(this);
- GPRReg resultGPR = result.gpr();
-
- flushRegisters();
- callOperation(operationToIndexString, resultGPR, index.gpr());
- cellResult(resultGPR, node);
- break;
- }
- case ProfileType: {
- JSValueOperand value(this, node->child1());
- GPRTemporary scratch1(this);
- GPRTemporary scratch2(this);
- GPRTemporary scratch3(this);
-
- GPRReg scratch1GPR = scratch1.gpr();
- GPRReg scratch2GPR = scratch2.gpr();
- GPRReg scratch3GPR = scratch3.gpr();
- GPRReg valueGPR = value.gpr();
-
- MacroAssembler::JumpList jumpToEnd;
-
- jumpToEnd.append(m_jit.branchTest64(JITCompiler::Zero, valueGPR));
-
- TypeLocation* cachedTypeLocation = node->typeLocation();
- // Compile in a predictive type check, if possible, to see if we can skip writing to the log.
- // These typechecks are inlined to match those of the 64-bit JSValue type checks.
- if (cachedTypeLocation->m_lastSeenType == TypeUndefined)
- jumpToEnd.append(m_jit.branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined()))));
- else if (cachedTypeLocation->m_lastSeenType == TypeNull)
- jumpToEnd.append(m_jit.branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsNull()))));
- else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) {
- m_jit.move(valueGPR, scratch2GPR);
- m_jit.and64(TrustedImm32(~1), scratch2GPR);
- jumpToEnd.append(m_jit.branch64(MacroAssembler::Equal, scratch2GPR, MacroAssembler::TrustedImm64(ValueFalse)));
- } else if (cachedTypeLocation->m_lastSeenType == TypeMachineInt)
- jumpToEnd.append(m_jit.branch64(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister));
- else if (cachedTypeLocation->m_lastSeenType == TypeNumber)
- jumpToEnd.append(m_jit.branchTest64(MacroAssembler::NonZero, valueGPR, GPRInfo::tagTypeNumberRegister));
- else if (cachedTypeLocation->m_lastSeenType == TypeString) {
- MacroAssembler::Jump isNotCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR));
- jumpToEnd.append(m_jit.branchIfString(valueGPR));
- isNotCell.link(&m_jit);
- }
-
- // Load the TypeProfilerLog into Scratch2.
- TypeProfilerLog* cachedTypeProfilerLog = m_jit.vm()->typeProfilerLog();
- m_jit.move(TrustedImmPtr(cachedTypeProfilerLog), scratch2GPR);
-
- // Load the next LogEntry into Scratch1.
- m_jit.loadPtr(MacroAssembler::Address(scratch2GPR, TypeProfilerLog::currentLogEntryOffset()), scratch1GPR);
-
- // Store the JSValue onto the log entry.
- m_jit.store64(valueGPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::valueOffset()));
-
- // Store the structureID of the cell if valueGPR is a cell, otherwise, store 0 on the log entry.
- MacroAssembler::Jump isNotCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR));
- m_jit.load32(MacroAssembler::Address(valueGPR, JSCell::structureIDOffset()), scratch3GPR);
- m_jit.store32(scratch3GPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::structureIDOffset()));
- MacroAssembler::Jump skipIsCell = m_jit.jump();
- isNotCell.link(&m_jit);
- m_jit.store32(TrustedImm32(0), MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::structureIDOffset()));
- skipIsCell.link(&m_jit);
-
- // Store the typeLocation on the log entry.
- m_jit.move(TrustedImmPtr(cachedTypeLocation), scratch3GPR);
- m_jit.storePtr(scratch3GPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::locationOffset()));
-
- // Increment the current log entry.
- m_jit.addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), scratch1GPR);
- m_jit.storePtr(scratch1GPR, MacroAssembler::Address(scratch2GPR, TypeProfilerLog::currentLogEntryOffset()));
- MacroAssembler::Jump clearLog = m_jit.branchPtr(MacroAssembler::Equal, scratch1GPR, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()));
- addSlowPathGenerator(
- slowPathCall(clearLog, this, operationProcessTypeProfilerLogDFG, NoResult));
-
- jumpToEnd.link(&m_jit);
-
- noResult(node);
- break;
- }
- case ProfileControlFlow: {
- BasicBlockLocation* basicBlockLocation = node->basicBlockLocation();
- if (!basicBlockLocation->hasExecuted()) {
- GPRTemporary scratch1(this);
- basicBlockLocation->emitExecuteCode(m_jit, scratch1.gpr());
- }
- noResult(node);
- break;
- }
-
#if ENABLE(FTL_JIT)
case CheckTierUpInLoop: {
MacroAssembler::Jump done = m_jit.branchAdd32(
@@ -4710,7 +4949,7 @@ void SpeculativeJIT::compile(Node* node)
silentSpillAllRegisters(InvalidGPRReg);
m_jit.setupArgumentsExecState();
- appendCall(triggerTierUpNowInLoop);
+ appendCall(triggerTierUpNow);
silentFillAllRegisters(InvalidGPRReg);
done.link(&m_jit);
@@ -4732,27 +4971,20 @@ void SpeculativeJIT::compile(Node* node)
break;
}
- case CheckTierUpAndOSREnter:
- case CheckTierUpWithNestedTriggerAndOSREnter: {
- ASSERT(!node->origin.semantic.inlineCallFrame);
+ case CheckTierUpAndOSREnter: {
+ ASSERT(!node->codeOrigin.inlineCallFrame);
GPRTemporary temp(this);
GPRReg tempGPR = temp.gpr();
-
- MacroAssembler::Jump forceOSREntry;
- if (op == CheckTierUpWithNestedTriggerAndOSREnter)
- forceOSREntry = m_jit.branchTest8(MacroAssembler::NonZero, MacroAssembler::AbsoluteAddress(&m_jit.jitCode()->nestedTriggerIsSet));
MacroAssembler::Jump done = m_jit.branchAdd32(
MacroAssembler::Signed,
TrustedImm32(Options::ftlTierUpCounterIncrementForLoop()),
MacroAssembler::AbsoluteAddress(&m_jit.jitCode()->tierUpCounter.m_counter));
-
- if (forceOSREntry.isSet())
- forceOSREntry.link(&m_jit);
+
silentSpillAllRegisters(tempGPR);
m_jit.setupArgumentsWithExecState(
- TrustedImm32(node->origin.semantic.bytecodeIndex),
+ TrustedImm32(node->codeOrigin.bytecodeIndex),
TrustedImm32(m_stream->size()));
appendCallSetResult(triggerOSREntryNow, tempGPR);
MacroAssembler::Jump dontEnter = m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR);
@@ -4767,34 +4999,18 @@ void SpeculativeJIT::compile(Node* node)
case CheckTierUpInLoop:
case CheckTierUpAtReturn:
case CheckTierUpAndOSREnter:
- case CheckTierUpWithNestedTriggerAndOSREnter:
- DFG_CRASH(m_jit.graph(), node, "Unexpected tier-up node");
+ RELEASE_ASSERT_NOT_REACHED();
break;
#endif // ENABLE(FTL_JIT)
-
+
case LastNodeType:
case Phi:
case Upsilon:
+ case GetArgument:
case ExtractOSREntryLocal:
case CheckInBounds:
case ArithIMul:
- case MultiGetByOffset:
- case MultiPutByOffset:
- case FiatInt52:
- case CheckBadCell:
- case BottomValue:
- case PhantomNewObject:
- case PhantomNewFunction:
- case PhantomCreateActivation:
- case GetMyArgumentByVal:
- case PutHint:
- case CheckStructureImmediate:
- case MaterializeNewObject:
- case MaterializeCreateActivation:
- case PutStack:
- case KillStack:
- case GetStack:
- DFG_CRASH(m_jit.graph(), node, "Unexpected node");
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
@@ -4810,86 +5026,30 @@ void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUs
{
JITCompiler::Jump isNotCell;
if (!isKnownCell(valueUse.node()))
- isNotCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR));
-
- JITCompiler::Jump ownerIsRememberedOrInEden = m_jit.jumpIfIsRememberedOrInEden(ownerGPR);
+ isNotCell = m_jit.branchTest64(JITCompiler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
+
+ JITCompiler::Jump definitelyNotMarked = genericWriteBarrier(m_jit, ownerGPR, scratch1, scratch2);
storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2);
- ownerIsRememberedOrInEden.link(&m_jit);
+ definitelyNotMarked.link(&m_jit);
if (!isKnownCell(valueUse.node()))
isNotCell.link(&m_jit);
}
-#endif // ENABLE(GGC)
-void SpeculativeJIT::moveTrueTo(GPRReg gpr)
+void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, GPRReg scratch1, GPRReg scratch2)
{
- m_jit.move(TrustedImm32(ValueTrue), gpr);
-}
+ JITCompiler::Jump isNotCell;
+ if (!isKnownCell(valueUse.node()))
+ isNotCell = m_jit.branchTest64(JITCompiler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
-void SpeculativeJIT::moveFalseTo(GPRReg gpr)
-{
- m_jit.move(TrustedImm32(ValueFalse), gpr);
-}
+ JITCompiler::Jump definitelyNotMarked = genericWriteBarrier(m_jit, owner);
+ storeToWriteBarrierBuffer(owner, scratch1, scratch2);
+ definitelyNotMarked.link(&m_jit);
-void SpeculativeJIT::blessBoolean(GPRReg gpr)
-{
- m_jit.or32(TrustedImm32(ValueFalse), gpr);
-}
-
-void SpeculativeJIT::convertMachineInt(Edge valueEdge, GPRReg resultGPR)
-{
- JSValueOperand value(this, valueEdge, ManualOperandSpeculation);
- GPRReg valueGPR = value.gpr();
-
- JITCompiler::Jump notInt32 =
- m_jit.branch64(JITCompiler::Below, valueGPR, GPRInfo::tagTypeNumberRegister);
-
- m_jit.signExtend32ToPtr(valueGPR, resultGPR);
- JITCompiler::Jump done = m_jit.jump();
-
- notInt32.link(&m_jit);
- silentSpillAllRegisters(resultGPR);
- callOperation(operationConvertBoxedDoubleToInt52, resultGPR, valueGPR);
- silentFillAllRegisters(resultGPR);
-
- DFG_TYPE_CHECK(
- JSValueRegs(valueGPR), valueEdge, SpecInt32 | SpecInt52AsDouble,
- m_jit.branch64(
- JITCompiler::Equal, resultGPR,
- JITCompiler::TrustedImm64(JSValue::notInt52)));
- done.link(&m_jit);
-}
-
-void SpeculativeJIT::speculateMachineInt(Edge edge)
-{
- if (!needsTypeCheck(edge, SpecInt32 | SpecInt52AsDouble))
- return;
-
- GPRTemporary temp(this);
- convertMachineInt(edge, temp.gpr());
-}
-
-void SpeculativeJIT::speculateDoubleRepMachineInt(Edge edge)
-{
- if (!needsTypeCheck(edge, SpecInt52AsDouble))
- return;
-
- SpeculateDoubleOperand value(this, edge);
- FPRReg valueFPR = value.fpr();
-
- GPRFlushedCallResult result(this);
- GPRReg resultGPR = result.gpr();
-
- flushRegisters();
-
- callOperation(operationConvertDoubleToInt52, resultGPR, valueFPR);
-
- DFG_TYPE_CHECK(
- JSValueRegs(), edge, SpecInt52AsDouble,
- m_jit.branch64(
- JITCompiler::Equal, resultGPR,
- JITCompiler::TrustedImm64(JSValue::notInt52)));
+ if (!isKnownCell(valueUse.node()))
+ isNotCell.link(&m_jit);
}
+#endif // ENABLE(GGC)
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.cpp b/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.cpp
index 0713720b5..cf1017624 100644
--- a/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,7 +31,7 @@
#include "DFGGraph.h"
#include "DFGPhase.h"
#include "DFGValueSource.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -46,6 +46,8 @@ public:
bool run()
{
+ SymbolTable* symbolTable = codeBlock()->symbolTable();
+
// This enumerates the locals that we actually care about and packs them. So for example
// if we use local 1, 3, 4, 5, 7, then we remap them: 1->0, 3->1, 4->2, 5->3, 7->4. We
// treat a variable as being "used" if there exists an access to it (SetLocal, GetLocal,
@@ -54,7 +56,7 @@ public:
BitVector usedLocals;
// Collect those variables that are used from IR.
- bool hasNodesThatNeedFixup = false;
+ bool hasGetLocalUnlinked = false;
for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
@@ -78,32 +80,7 @@ public:
if (operand.isArgument())
break;
usedLocals.set(operand.toLocal());
- hasNodesThatNeedFixup = true;
- break;
- }
-
- case LoadVarargs:
- case ForwardVarargs: {
- LoadVarargsData* data = node->loadVarargsData();
- if (data->count.isLocal())
- usedLocals.set(data->count.toLocal());
- if (data->start.isLocal()) {
- // This part really relies on the contiguity of stack layout
- // assignments.
- ASSERT(VirtualRegister(data->start.offset() + data->limit - 1).isLocal());
- for (unsigned i = data->limit; i--;)
- usedLocals.set(VirtualRegister(data->start.offset() + i).toLocal());
- } // the else case shouldn't happen.
- hasNodesThatNeedFixup = true;
- break;
- }
-
- case PutStack:
- case GetStack: {
- StackAccessData* stack = node->stackAccessData();
- if (stack->local.isArgument())
- break;
- usedLocals.set(stack->local.toLocal());
+ hasGetLocalUnlinked = true;
break;
}
@@ -113,13 +90,27 @@ public:
}
}
- for (InlineCallFrameSet::iterator iter = m_graph.m_plan.inlineCallFrames->begin(); !!iter; ++iter) {
+ // Ensure that captured variables and captured inline arguments are pinned down.
+ // They should have been because of flushes, except that the flushes can be optimized
+ // away.
+ if (symbolTable) {
+ for (int i = symbolTable->captureStart(); i > symbolTable->captureEnd(); i--)
+ usedLocals.set(VirtualRegister(i).toLocal());
+ }
+ if (codeBlock()->usesArguments()) {
+ usedLocals.set(codeBlock()->argumentsRegister().toLocal());
+ usedLocals.set(unmodifiedArgumentsRegister(codeBlock()->argumentsRegister()).toLocal());
+ }
+ if (codeBlock()->uncheckedActivationRegister().isValid())
+ usedLocals.set(codeBlock()->activationRegister().toLocal());
+ for (InlineCallFrameSet::iterator iter = m_graph.m_inlineCallFrames->begin(); !!iter; ++iter) {
InlineCallFrame* inlineCallFrame = *iter;
+ if (!inlineCallFrame->executable->usesArguments())
+ continue;
- if (inlineCallFrame->isVarargs()) {
- usedLocals.set(VirtualRegister(
- JSStack::ArgumentCount + inlineCallFrame->stackOffset).toLocal());
- }
+ VirtualRegister argumentsRegister = m_graph.argumentsRegisterFor(inlineCallFrame);
+ usedLocals.set(argumentsRegister.toLocal());
+ usedLocals.set(unmodifiedArgumentsRegister(argumentsRegister).toLocal());
for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
usedLocals.set(VirtualRegister(
@@ -156,37 +147,38 @@ public:
if (allocation[local] == UINT_MAX)
continue;
- variable->machineLocal() = assign(allocation, variable->local());
+ variable->machineLocal() = virtualRegisterForLocal(
+ allocation[variable->local().toLocal()]);
}
- for (StackAccessData* data : m_graph.m_stackAccessData) {
- if (!data->local.isLocal()) {
- data->machineLocal = data->local;
- continue;
- }
-
- if (static_cast<size_t>(data->local.toLocal()) >= allocation.size())
- continue;
- if (allocation[data->local.toLocal()] == UINT_MAX)
- continue;
-
- data->machineLocal = assign(allocation, data->local);
+ if (codeBlock()->usesArguments()) {
+ VirtualRegister argumentsRegister = virtualRegisterForLocal(
+ allocation[codeBlock()->argumentsRegister().toLocal()]);
+ RELEASE_ASSERT(
+ virtualRegisterForLocal(allocation[
+ unmodifiedArgumentsRegister(
+ codeBlock()->argumentsRegister()).toLocal()])
+ == unmodifiedArgumentsRegister(argumentsRegister));
+ codeBlock()->setArgumentsRegister(argumentsRegister);
+ }
+
+ if (codeBlock()->uncheckedActivationRegister().isValid()) {
+ codeBlock()->setActivationRegister(
+ virtualRegisterForLocal(allocation[codeBlock()->activationRegister().toLocal()]));
}
- // This register is never valid for DFG code blocks.
- codeBlock()->setActivationRegister(VirtualRegister());
- if (LIKELY(!m_graph.hasDebuggerEnabled()))
- codeBlock()->setScopeRegister(VirtualRegister());
- else
- codeBlock()->setScopeRegister(assign(allocation, codeBlock()->scopeRegister()));
-
for (unsigned i = m_graph.m_inlineVariableData.size(); i--;) {
InlineVariableData data = m_graph.m_inlineVariableData[i];
InlineCallFrame* inlineCallFrame = data.inlineCallFrame;
- if (inlineCallFrame->isVarargs()) {
- inlineCallFrame->argumentCountRegister = assign(
- allocation, VirtualRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount));
+ if (inlineCallFrame->executable->usesArguments()) {
+ inlineCallFrame->argumentsRegister = virtualRegisterForLocal(
+ allocation[m_graph.argumentsRegisterFor(inlineCallFrame).toLocal()]);
+
+ RELEASE_ASSERT(
+ virtualRegisterForLocal(allocation[unmodifiedArgumentsRegister(
+ m_graph.argumentsRegisterFor(inlineCallFrame)).toLocal()])
+ == unmodifiedArgumentsRegister(inlineCallFrame->argumentsRegister));
}
for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
@@ -205,17 +197,42 @@ public:
RELEASE_ASSERT(inlineCallFrame->isClosureCall == !!data.calleeVariable);
if (inlineCallFrame->isClosureCall) {
- VariableAccessData* variable = data.calleeVariable->find();
ValueSource source = ValueSource::forFlushFormat(
- variable->machineLocal(),
- variable->flushFormat());
+ data.calleeVariable->machineLocal(),
+ data.calleeVariable->flushFormat());
inlineCallFrame->calleeRecovery = source.valueRecovery();
} else
RELEASE_ASSERT(inlineCallFrame->calleeRecovery.isConstant());
}
+ if (symbolTable) {
+ if (symbolTable->captureCount()) {
+ unsigned captureStartLocal = allocation[
+ VirtualRegister(codeBlock()->symbolTable()->captureStart()).toLocal()];
+ ASSERT(captureStartLocal != UINT_MAX);
+ m_graph.m_machineCaptureStart = virtualRegisterForLocal(captureStartLocal).offset();
+ } else
+ m_graph.m_machineCaptureStart = virtualRegisterForLocal(0).offset();
+
+ // This is an abomination. If we had captured an argument then the argument ends
+ // up being "slow", meaning that loads of the argument go through an extra lookup
+ // table.
+ if (const SlowArgument* slowArguments = symbolTable->slowArguments()) {
+ auto newSlowArguments = std::make_unique<SlowArgument[]>(
+ symbolTable->parameterCount());
+ for (size_t i = symbolTable->parameterCount(); i--;) {
+ newSlowArguments[i] = slowArguments[i];
+ VirtualRegister reg = VirtualRegister(slowArguments[i].index);
+ if (reg.isLocal())
+ newSlowArguments[i].index = virtualRegisterForLocal(allocation[reg.toLocal()]).offset();
+ }
+
+ m_graph.m_slowArguments = std::move(newSlowArguments);
+ }
+ }
+
// Fix GetLocalUnlinked's variable references.
- if (hasNodesThatNeedFixup) {
+ if (hasGetLocalUnlinked) {
for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
@@ -224,15 +241,10 @@ public:
Node* node = block->at(nodeIndex);
switch (node->op()) {
case GetLocalUnlinked: {
- node->setUnlinkedMachineLocal(assign(allocation, node->unlinkedLocal()));
- break;
- }
-
- case LoadVarargs:
- case ForwardVarargs: {
- LoadVarargsData* data = node->loadVarargsData();
- data->machineCount = assign(allocation, data->count);
- data->machineStart = assign(allocation, data->start);
+ VirtualRegister operand = node->unlinkedLocal();
+ if (operand.isLocal())
+ operand = virtualRegisterForLocal(allocation[operand.toLocal()]);
+ node->setUnlinkedMachineLocal(operand);
break;
}
@@ -245,20 +257,6 @@ public:
return true;
}
-
-private:
- VirtualRegister assign(const Vector<unsigned>& allocation, VirtualRegister src)
- {
- VirtualRegister result = src;
- if (result.isLocal()) {
- unsigned myAllocation = allocation[result.toLocal()];
- if (myAllocation == UINT_MAX)
- result = VirtualRegister();
- else
- result = virtualRegisterForLocal(myAllocation);
- }
- return result;
- }
};
bool performStackLayout(Graph& graph)
diff --git a/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.h b/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.h
index ccb0cea88..b18ff9505 100644
--- a/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGStackLayoutPhase_h
#define DFGStackLayoutPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGStaticExecutionCountEstimationPhase.cpp b/Source/JavaScriptCore/dfg/DFGStaticExecutionCountEstimationPhase.cpp
deleted file mode 100644
index b5e1a364f..000000000
--- a/Source/JavaScriptCore/dfg/DFGStaticExecutionCountEstimationPhase.cpp
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGStaticExecutionCountEstimationPhase.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGBasicBlockInlines.h"
-#include "DFGGraph.h"
-#include "DFGPhase.h"
-#include "JSCInlines.h"
-
-namespace JSC { namespace DFG {
-
-class StaticExecutionCountEstimationPhase : public Phase {
-public:
- StaticExecutionCountEstimationPhase(Graph& graph)
- : Phase(graph, "static execution count estimation")
- {
- }
-
- bool run()
- {
- m_graph.m_naturalLoops.computeIfNecessary(m_graph);
-
- // Estimate basic block execution counts based on loop depth.
- for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
- BasicBlock* block = m_graph.block(blockIndex);
- if (!block)
- continue;
-
- block->executionCount = pow(10, m_graph.m_naturalLoops.loopDepth(block));
- }
-
- // Estimate branch weights based on execution counts. This isn't quite correct. It'll
- // assume that each block's conditional successor only has that block as its
- // predecessor.
- for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
- BasicBlock* block = m_graph.block(blockIndex);
- if (!block)
- continue;
-
- Node* terminal = block->terminal();
- switch (terminal->op()) {
- case Branch: {
- BranchData* data = terminal->branchData();
- applyCounts(data->taken);
- applyCounts(data->notTaken);
- break;
- }
-
- case Switch: {
- SwitchData* data = terminal->switchData();
- for (unsigned i = data->cases.size(); i--;)
- applyCounts(data->cases[i].target);
- applyCounts(data->fallThrough);
- break;
- }
-
- default:
- break;
- }
- }
-
- return true;
- }
-
-private:
- void applyCounts(BranchTarget& target)
- {
- target.count = target.block->executionCount;
- }
-};
-
-bool performStaticExecutionCountEstimation(Graph& graph)
-{
- SamplingRegion samplingRegion("DFG Static Execution Count Estimation");
- return runPhase<StaticExecutionCountEstimationPhase>(graph);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-
diff --git a/Source/JavaScriptCore/dfg/DFGStaticExecutionCountEstimationPhase.h b/Source/JavaScriptCore/dfg/DFGStaticExecutionCountEstimationPhase.h
deleted file mode 100644
index e66f7ec2c..000000000
--- a/Source/JavaScriptCore/dfg/DFGStaticExecutionCountEstimationPhase.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGStaticExecutionCountEstimationPhase_h
-#define DFGStaticExecutionCountEstimationPhase_h
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-class Graph;
-
-// Estimate execution counts (branch execution counts, in particular) based on
-// presently available static information. This phase is important because
-// subsequent CFG transformations, such as OSR entrypoint creation, perturb our
-// ability to do accurate static estimations. Hence we lock in the estimates early.
-// Ideally, we would have dynamic information, but we don't right now, so this is as
-// good as it gets.
-//
-// It's worth noting that if we didn't have this phase, then the static estimation
-// would be perfomed by LLVM instead. It's worth trying to make this phase perform
-// the estimates using the same heuristics that LLVM would use.
-
-bool performStaticExecutionCountEstimation(Graph&);
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGStaticExecutionCountEstimationPhase_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGStoreBarrierElisionPhase.cpp b/Source/JavaScriptCore/dfg/DFGStoreBarrierElisionPhase.cpp
new file mode 100644
index 000000000..d73c5201e
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGStoreBarrierElisionPhase.cpp
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGStoreBarrierElisionPhase.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGBasicBlock.h"
+#include "DFGClobberSet.h"
+#include "DFGGraph.h"
+#include "DFGPhase.h"
+#include <wtf/HashSet.h>
+
+namespace JSC { namespace DFG {
+
+class StoreBarrierElisionPhase : public Phase {
+public:
+ StoreBarrierElisionPhase(Graph& graph)
+ : Phase(graph, "store barrier elision")
+ , m_currentBlock(0)
+ , m_currentIndex(0)
+ {
+ m_gcClobberSet.add(GCState);
+ }
+
+ bool run()
+ {
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
+ m_currentBlock = m_graph.block(blockIndex);
+ if (!m_currentBlock)
+ continue;
+ handleBlock(m_currentBlock);
+ }
+ return true;
+ }
+
+private:
+ bool couldCauseGC(Node* node)
+ {
+ return writesOverlap(m_graph, node, m_gcClobberSet);
+ }
+
+ bool allocatesFreshObject(Node* node)
+ {
+ switch (node->op()) {
+ case NewObject:
+ case NewArray:
+ case NewArrayWithSize:
+ case NewArrayBuffer:
+ case NewTypedArray:
+ case NewRegexp:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ void noticeFreshObject(HashSet<Node*>& dontNeedBarriers, Node* node)
+ {
+ ASSERT(allocatesFreshObject(node));
+ dontNeedBarriers.add(node);
+ }
+
+ Node* getBaseOfStore(Node* barrierNode)
+ {
+ ASSERT(barrierNode->isStoreBarrier());
+ return barrierNode->child1().node();
+ }
+
+ bool shouldBeElided(HashSet<Node*>& dontNeedBarriers, Node* node)
+ {
+ ASSERT(node->isStoreBarrier());
+ return dontNeedBarriers.contains(node->child1().node());
+ }
+
+ void elideBarrier(Node* node)
+ {
+ ASSERT(node->isStoreBarrier());
+ node->convertToPhantom();
+ }
+
+ void handleNode(HashSet<Node*>& dontNeedBarriers, Node* node)
+ {
+ if (couldCauseGC(node))
+ dontNeedBarriers.clear();
+
+ if (allocatesFreshObject(node))
+ noticeFreshObject(dontNeedBarriers, node);
+
+ if (!node->isStoreBarrier())
+ return;
+
+ if (shouldBeElided(dontNeedBarriers, node)) {
+ elideBarrier(node);
+ return;
+ }
+
+ Node* base = getBaseOfStore(node);
+ if (!base)
+ return;
+
+ if (dontNeedBarriers.contains(base))
+ return;
+ dontNeedBarriers.add(base);
+ }
+
+ bool handleBlock(BasicBlock* block)
+ {
+ HashSet<Node*> dontNeedBarriers;
+ for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
+ m_currentIndex = indexInBlock;
+ Node* node = block->at(indexInBlock);
+ handleNode(dontNeedBarriers, node);
+ }
+ return true;
+ }
+
+ ClobberSet m_gcClobberSet;
+ BasicBlock* m_currentBlock;
+ unsigned m_currentIndex;
+};
+
+bool performStoreBarrierElision(Graph& graph)
+{
+ SamplingRegion samplingRegion("DFG Store Barrier Elision Phase");
+ return runPhase<StoreBarrierElisionPhase>(graph);
+}
+
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGFunctionWhitelist.h b/Source/JavaScriptCore/dfg/DFGStoreBarrierElisionPhase.h
index 92498a90b..94276bea1 100644
--- a/Source/JavaScriptCore/dfg/DFGFunctionWhitelist.h
+++ b/Source/JavaScriptCore/dfg/DFGStoreBarrierElisionPhase.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,35 +23,15 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DFGFunctionWhitelist_h
-#define DFGFunctionWhitelist_h
+#ifndef DFGStoreBarrierElisionPhase_h
+#define DFGStoreBarrierElisionPhase_h
-#if ENABLE(DFG_JIT)
+namespace JSC { namespace DFG {
-#include <wtf/HashSet.h>
-#include <wtf/text/WTFString.h>
+class Graph;
-namespace JSC {
-
-class CodeBlock;
-
-namespace DFG {
-
-class FunctionWhitelist {
-public:
- static FunctionWhitelist& ensureGlobalWhitelist();
- explicit FunctionWhitelist(const char*);
-
- bool contains(CodeBlock*) const;
-
-private:
- void parseFunctionNamesInFile(const char*);
-
- HashSet<String> m_entries;
-};
+bool performStoreBarrierElision(Graph&);
} } // namespace JSC::DFG
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGFunctionWhitelist_h
+#endif // DFGStoreBarrierElisionPhase_h
diff --git a/Source/JavaScriptCore/dfg/DFGStoreBarrierInsertionPhase.cpp b/Source/JavaScriptCore/dfg/DFGStoreBarrierInsertionPhase.cpp
deleted file mode 100644
index 66acda29b..000000000
--- a/Source/JavaScriptCore/dfg/DFGStoreBarrierInsertionPhase.cpp
+++ /dev/null
@@ -1,528 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGStoreBarrierInsertionPhase.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGAbstractInterpreterInlines.h"
-#include "DFGBlockMapInlines.h"
-#include "DFGDoesGC.h"
-#include "DFGGraph.h"
-#include "DFGInPlaceAbstractState.h"
-#include "DFGInsertionSet.h"
-#include "DFGPhase.h"
-#include "JSCInlines.h"
-#include <wtf/CommaPrinter.h>
-#include <wtf/HashSet.h>
-
-namespace JSC { namespace DFG {
-
-namespace {
-
-bool verbose = false;
-
-enum class PhaseMode {
- // Does only a local analysis for store barrier insertion and assumes that pointers live
- // from predecessor blocks may need barriers. Assumes CPS conventions. Does not use AI for
- // eliminating store barriers, but does a best effort to eliminate barriers when you're
- // storing a non-cell value by using Node::result() and by looking at constants. The local
- // analysis is based on GC epochs, so it will eliminate a lot of locally redundant barriers.
- Fast,
-
- // Does a global analysis for store barrier insertion. Reuses the GC-epoch-based analysis
- // used by Fast, but adds a conservative merge rule for propagating information from one
- // block to the next. This will ensure for example that if a value V coming from multiple
- // predecessors in B didn't need any more barriers at the end of each predecessor (either
- // because it was the last allocated object in that predecessor or because it just had a
- // barrier executed), then until we hit another GC point in B, we won't need another barrier
- // on V. Uses AI for eliminating barriers when we know that the value being stored is not a
- // cell. Assumes SSA conventions.
- Global
-};
-
-template<PhaseMode mode>
-class StoreBarrierInsertionPhase : public Phase {
-public:
- StoreBarrierInsertionPhase(Graph& graph)
- : Phase(graph, mode == PhaseMode::Fast ? "fast store barrier insertion" : "global store barrier insertion")
- , m_insertionSet(graph)
- {
- }
-
- bool run()
- {
- if (verbose) {
- dataLog("Starting store barrier insertion:\n");
- m_graph.dump();
- }
-
- switch (mode) {
- case PhaseMode::Fast: {
- DFG_ASSERT(m_graph, nullptr, m_graph.m_form != SSA);
-
- m_graph.clearEpochs();
- for (BasicBlock* block : m_graph.blocksInNaturalOrder())
- handleBlock(block);
- return true;
- }
-
- case PhaseMode::Global: {
- DFG_ASSERT(m_graph, nullptr, m_graph.m_form == SSA);
-
- m_state = std::make_unique<InPlaceAbstractState>(m_graph);
- m_interpreter = std::make_unique<AbstractInterpreter<InPlaceAbstractState>>(m_graph, *m_state);
-
- m_isConverged = false;
-
- // First run the analysis. Inside basic blocks we use an epoch-based analysis that
- // is very precise. At block boundaries, we just propagate which nodes may need a
- // barrier. This gives us a very nice bottom->top fixpoint: we start out assuming
- // that no node needs any barriers at block boundaries, and then we converge
- // towards believing that all nodes need barriers. "Needing a barrier" is like
- // saying that the node is in a past epoch. "Not needing a barrier" is like saying
- // that the node is in the current epoch.
- m_stateAtHead = std::make_unique<BlockMap<HashSet<Node*>>>(m_graph);
- m_stateAtTail = std::make_unique<BlockMap<HashSet<Node*>>>(m_graph);
-
- BlockList postOrder = m_graph.blocksInPostOrder();
-
- bool changed = true;
- while (changed) {
- changed = false;
-
- // Intentional backwards loop because we are using RPO.
- for (unsigned blockIndex = postOrder.size(); blockIndex--;) {
- BasicBlock* block = postOrder[blockIndex];
-
- if (!handleBlock(block)) {
- // If the block didn't finish, then it cannot affect the fixpoint.
- continue;
- }
-
- // Construct the state-at-tail based on the epochs of live nodes and the
- // current epoch. We grow state-at-tail monotonically to ensure convergence.
- bool thisBlockChanged = false;
- for (Node* node : block->ssa->liveAtTail) {
- if (node->epoch() != m_currentEpoch) {
- // If the node is older than the current epoch, then we may need to
- // run a barrier on it in the future. So, add it to the state.
- thisBlockChanged |= m_stateAtTail->at(block).add(node).isNewEntry;
- }
- }
-
- if (!thisBlockChanged) {
- // This iteration didn't learn anything new about this block.
- continue;
- }
-
- // Changed things. Make sure that we loop one more time.
- changed = true;
-
- for (BasicBlock* successor : block->successors()) {
- for (Node* node : m_stateAtTail->at(block))
- m_stateAtHead->at(successor).add(node);
- }
- }
- }
-
- // Tell handleBlock() that it's time to actually insert barriers for real.
- m_isConverged = true;
-
- for (BasicBlock* block : m_graph.blocksInNaturalOrder())
- handleBlock(block);
-
- return true;
- } }
-
- RELEASE_ASSERT_NOT_REACHED();
- return false;
- }
-
-private:
- bool handleBlock(BasicBlock* block)
- {
- if (verbose) {
- dataLog("Dealing with block ", pointerDump(block), "\n");
- if (reallyInsertBarriers())
- dataLog(" Really inserting barriers.\n");
- }
-
- m_currentEpoch = Epoch::first();
-
- if (mode == PhaseMode::Global) {
- if (!block->cfaHasVisited)
- return false;
- m_state->beginBasicBlock(block);
-
- for (Node* node : block->ssa->liveAtHead) {
- if (m_stateAtHead->at(block).contains(node)) {
- // If previous blocks tell us that this node may need a barrier in the
- // future, then put it in the ancient primordial epoch. This forces us to
- // emit a barrier on any possibly-cell store, regardless of the epoch of the
- // stored value.
- node->setEpoch(Epoch());
- } else {
- // If previous blocks aren't requiring us to run a barrier on this node,
- // then put it in the current epoch. This means that we will skip barriers
- // on this node so long as we don't allocate. It also means that we won't
- // run barriers on stores to on one such node into another such node. That's
- // fine, because nodes would be excluded from the state set if at the tails
- // of all predecessors they always had the current epoch.
- node->setEpoch(m_currentEpoch);
- }
- }
- }
-
- bool result = true;
-
- for (m_nodeIndex = 0; m_nodeIndex < block->size(); ++m_nodeIndex) {
- m_node = block->at(m_nodeIndex);
-
- if (verbose) {
- dataLog(
- " ", m_currentEpoch, ": Looking at node ", m_node, " with children: ");
- CommaPrinter comma;
- m_graph.doToChildren(
- m_node,
- [&] (Edge edge) {
- dataLog(comma, edge, " (", edge->epoch(), ")");
- });
- dataLog("\n");
- }
-
- if (mode == PhaseMode::Global) {
- // Execute edges separately because we don't want to insert barriers if the
- // operation doing the store does a check that ensures that the child is not
- // a cell.
- m_interpreter->startExecuting();
- m_interpreter->executeEdges(m_node);
- }
-
- switch (m_node->op()) {
- case PutByValDirect:
- case PutByVal:
- case PutByValAlias: {
- switch (m_node->arrayMode().modeForPut().type()) {
- case Array::Contiguous:
- case Array::ArrayStorage:
- case Array::SlowPutArrayStorage: {
- Edge child1 = m_graph.varArgChild(m_node, 0);
- Edge child3 = m_graph.varArgChild(m_node, 2);
- considerBarrier(child1, child3);
- break;
- }
- default:
- break;
- }
- break;
- }
-
- case ArrayPush: {
- switch (m_node->arrayMode().type()) {
- case Array::Contiguous:
- case Array::ArrayStorage:
- considerBarrier(m_node->child1(), m_node->child2());
- break;
- default:
- break;
- }
- break;
- }
-
- case PutStructure: {
- considerBarrier(m_node->child1());
- break;
- }
-
- case PutClosureVar:
- case PutToArguments:
- case PutById:
- case PutByIdFlush:
- case PutByIdDirect:
- case MultiPutByOffset: {
- considerBarrier(m_node->child1(), m_node->child2());
- break;
- }
-
- case PutByOffset: {
- considerBarrier(m_node->child2(), m_node->child3());
- break;
- }
-
- case PutGlobalVar: {
- considerBarrier(m_node->child1(), m_node->child2());
- break;
- }
-
- default:
- break;
- }
-
- if (doesGC(m_graph, m_node))
- m_currentEpoch.bump();
-
- switch (m_node->op()) {
- case NewObject:
- case NewArray:
- case NewArrayWithSize:
- case NewArrayBuffer:
- case NewTypedArray:
- case NewRegexp:
- case MaterializeNewObject:
- case MaterializeCreateActivation:
- case NewStringObject:
- case MakeRope:
- case CreateActivation:
- case CreateDirectArguments:
- case CreateScopedArguments:
- case CreateClonedArguments:
- case NewFunction:
- // Nodes that allocate get to set their epoch because for those nodes we know
- // that they will be the newest object in the heap.
- m_node->setEpoch(m_currentEpoch);
- break;
-
- case AllocatePropertyStorage:
- case ReallocatePropertyStorage:
- // These allocate but then run their own barrier.
- insertBarrier(m_nodeIndex + 1, m_node->child1().node());
- m_node->setEpoch(Epoch());
- break;
-
- case Upsilon:
- m_node->phi()->setEpoch(m_node->epoch());
- m_node->setEpoch(Epoch());
- break;
-
- default:
- // For nodes that aren't guaranteed to allocate, we say that their return value
- // (if there is one) could be arbitrarily old.
- m_node->setEpoch(Epoch());
- break;
- }
-
- if (verbose) {
- dataLog(
- " ", m_currentEpoch, ": Done with node ", m_node, " (", m_node->epoch(),
- ") with children: ");
- CommaPrinter comma;
- m_graph.doToChildren(
- m_node,
- [&] (Edge edge) {
- dataLog(comma, edge, " (", edge->epoch(), ")");
- });
- dataLog("\n");
- }
-
- if (mode == PhaseMode::Global) {
- if (!m_interpreter->executeEffects(m_nodeIndex, m_node)) {
- result = false;
- break;
- }
- }
- }
-
- if (mode == PhaseMode::Global)
- m_state->reset();
-
- if (reallyInsertBarriers())
- m_insertionSet.execute(block);
-
- return result;
- }
-
- void considerBarrier(Edge base, Edge child)
- {
- if (verbose)
- dataLog(" Considering adding barrier ", base, " => ", child, "\n");
-
- // We don't need a store barrier if the child is guaranteed to not be a cell.
- switch (mode) {
- case PhaseMode::Fast: {
- // Don't try too hard because it's too expensive to run AI.
- if (child->hasConstant()) {
- if (!child->asJSValue().isCell()) {
- if (verbose)
- dataLog(" Rejecting because of constant type.\n");
- return;
- }
- } else {
- switch (child->result()) {
- case NodeResultNumber:
- case NodeResultDouble:
- case NodeResultInt32:
- case NodeResultInt52:
- case NodeResultBoolean:
- if (verbose)
- dataLog(" Rejecting because of result type.\n");
- return;
- default:
- break;
- }
- }
- break;
- }
-
- case PhaseMode::Global: {
- // Go into rage mode to eliminate any chance of a barrier with a non-cell child. We
- // can afford to keep around AI in Global mode.
- if (!m_interpreter->needsTypeCheck(child, ~SpecCell)) {
- if (verbose)
- dataLog(" Rejecting because of AI type.\n");
- return;
- }
- break;
- } }
-
- // We don't need a store barrier if the base is at least as new as the child. For
- // example this won't need a barrier:
- //
- // var o = {}
- // var p = {}
- // p.f = o
- //
- // This is stronger than the currentEpoch rule in considerBarrier(Edge), because it will
- // also eliminate barriers in cases like this:
- //
- // var o = {} // o.epoch = 1, currentEpoch = 1
- // var p = {} // o.epoch = 1, p.epoch = 2, currentEpoch = 2
- // var q = {} // o.epoch = 1, p.epoch = 2, q.epoch = 3, currentEpoch = 3
- // p.f = o // p.epoch >= o.epoch
- //
- // This relationship works because if it holds then we are in one of the following
- // scenarios. Note that we don't know *which* of these scenarios we are in, but it's
- // one of them (though without loss of generality, you can replace "a GC happened" with
- // "many GCs happened").
- //
- // 1) There is no GC between the allocation/last-barrier of base, child and now. Then
- // we definitely don't need a barrier.
- //
- // 2) There was a GC after child was allocated but before base was allocated. Then we
- // don't need a barrier, because base is still a new object.
- //
- // 3) There was a GC after both child and base were allocated. Then they are both old.
- // We don't need barriers on stores of old into old. Note that in this case it
- // doesn't matter if there was also a GC between the allocation of child and base.
- //
- // Note that barriers will lift an object into the current epoch. This is sort of weird.
- // It means that later if you store that object into some other object, and that other
- // object was previously newer object, you'll think that you need a barrier. We could
- // avoid this by tracking allocation epoch and barrier epoch separately. For now I think
- // that this would be overkill. But this does mean that there are the following
- // possibilities when this relationship holds:
- //
- // 4) Base is allocated first. A GC happens and base becomes old. Then we allocate
- // child. (Note that alternatively the GC could happen during the allocation of
- // child.) Then we run a barrier on base. Base will appear to be as new as child
- // (same epoch). At this point, we don't need another barrier on base.
- //
- // 5) Base is allocated first. Then we allocate child. Then we run a GC. Then we run a
- // barrier on base. Base will appear newer than child. We don't need a barrier
- // because both objects are old.
- //
- // Something we watch out for here is that the null epoch is a catch-all for objects
- // allocated before we did any epoch tracking. Two objects being in the null epoch
- // means that we don't know their epoch relationship.
- if (!!base->epoch() && base->epoch() >= child->epoch()) {
- if (verbose)
- dataLog(" Rejecting because of epoch ordering.\n");
- return;
- }
-
- considerBarrier(base);
- }
-
- void considerBarrier(Edge base)
- {
- if (verbose)
- dataLog(" Considering adding barrier on ", base, "\n");
-
- // We don't need a store barrier if the epoch of the base is identical to the current
- // epoch. That means that we either just allocated the object and so it's guaranteed to
- // be in newgen, or we just ran a barrier on it so it's guaranteed to be remembered
- // already.
- if (base->epoch() == m_currentEpoch) {
- if (verbose)
- dataLog(" Rejecting because it's in the current epoch.\n");
- return;
- }
-
- if (verbose)
- dataLog(" Inserting barrier.\n");
- insertBarrier(m_nodeIndex, base.node());
- }
-
- void insertBarrier(unsigned nodeIndex, Node* base)
- {
- // If we're in global mode, we should only insert the barriers once we have converged.
- if (!reallyInsertBarriers())
- return;
-
- // FIXME: We could support StoreBarrier(UntypedUse:). That would be sort of cool.
- // But right now we don't need it.
- m_insertionSet.insertNode(
- nodeIndex, SpecNone, StoreBarrier, m_node->origin, Edge(base, CellUse));
-
- base->setEpoch(m_currentEpoch);
- }
-
- bool reallyInsertBarriers()
- {
- return mode == PhaseMode::Fast || m_isConverged;
- }
-
- InsertionSet m_insertionSet;
- Epoch m_currentEpoch;
- unsigned m_nodeIndex;
- Node* m_node;
-
- // Things we only use in Global mode.
- std::unique_ptr<InPlaceAbstractState> m_state;
- std::unique_ptr<AbstractInterpreter<InPlaceAbstractState>> m_interpreter;
- std::unique_ptr<BlockMap<HashSet<Node*>>> m_stateAtHead;
- std::unique_ptr<BlockMap<HashSet<Node*>>> m_stateAtTail;
- bool m_isConverged;
-};
-
-} // anonymous namespace
-
-bool performFastStoreBarrierInsertion(Graph& graph)
-{
- SamplingRegion samplingRegion("DFG Fast Store Barrier Insertion Phase");
- return runPhase<StoreBarrierInsertionPhase<PhaseMode::Fast>>(graph);
-}
-
-bool performGlobalStoreBarrierInsertion(Graph& graph)
-{
- SamplingRegion samplingRegion("DFG Global Store Barrier Insertion Phase");
- return runPhase<StoreBarrierInsertionPhase<PhaseMode::Global>>(graph);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp b/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp
index 03515d52a..3aa991c48 100644
--- a/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,14 +28,12 @@
#if ENABLE(DFG_JIT)
-#include "DFGAbstractHeap.h"
-#include "DFGClobberize.h"
#include "DFGGraph.h"
#include "DFGInsertionSet.h"
#include "DFGPhase.h"
#include "DFGPredictionPropagationPhase.h"
#include "DFGVariableAccessDataDump.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -72,163 +70,73 @@ private:
{
switch (m_node->op()) {
case BitOr:
- handleCommutativity();
-
- if (m_node->child2()->isInt32Constant() && !m_node->child2()->asInt32()) {
- convertToIdentityOverChild1();
- break;
+ if (m_node->child1()->isConstant()) {
+ JSValue op1 = m_graph.valueOfJSConstant(m_node->child1().node());
+ if (op1.isInt32() && !op1.asInt32()) {
+ convertToIdentityOverChild2();
+ break;
+ }
+ }
+ if (m_node->child2()->isConstant()) {
+ JSValue op2 = m_graph.valueOfJSConstant(m_node->child2().node());
+ if (op2.isInt32() && !op2.asInt32()) {
+ convertToIdentityOverChild1();
+ break;
+ }
}
- break;
-
- case BitXor:
- case BitAnd:
- handleCommutativity();
break;
case BitLShift:
case BitRShift:
case BitURShift:
- if (m_node->child2()->isInt32Constant() && !(m_node->child2()->asInt32() & 0x1f)) {
- convertToIdentityOverChild1();
- break;
- }
- break;
-
- case UInt32ToNumber:
- if (m_node->child1()->op() == BitURShift
- && m_node->child1()->child2()->isInt32Constant()
- && (m_node->child1()->child2()->asInt32() & 0x1f)
- && m_node->arithMode() != Arith::DoOverflow) {
- m_node->convertToIdentity();
- m_changed = true;
- break;
- }
- break;
-
- case ArithAdd:
- handleCommutativity();
-
- if (m_node->child2()->isInt32Constant() && !m_node->child2()->asInt32()) {
- convertToIdentityOverChild1();
- break;
- }
- break;
-
- case ArithMul:
- handleCommutativity();
- break;
-
- case ArithSub:
- if (m_node->child2()->isInt32Constant()
- && m_node->isBinaryUseKind(Int32Use)) {
- int32_t value = m_node->child2()->asInt32();
- if (-value != value) {
- m_node->setOp(ArithAdd);
- m_node->child2().setNode(
- m_insertionSet.insertConstant(
- m_nodeIndex, m_node->origin, jsNumber(-value)));
- m_changed = true;
- break;
- }
- }
- break;
-
- case ArithPow:
- if (m_node->child2()->isNumberConstant()) {
- double yOperandValue = m_node->child2()->asNumber();
- if (yOperandValue == 1) {
+ if (m_node->child2()->isConstant()) {
+ JSValue op2 = m_graph.valueOfJSConstant(m_node->child2().node());
+ if (op2.isInt32() && !(op2.asInt32() & 0x1f)) {
convertToIdentityOverChild1();
- } else if (yOperandValue == 0.5) {
- m_insertionSet.insertCheck(m_nodeIndex, m_node);
- m_node->convertToArithSqrt();
- m_changed = true;
+ break;
}
}
break;
-
- case ValueRep:
- case Int52Rep:
- case DoubleRep: {
- // This short-circuits circuitous conversions, like ValueRep(DoubleRep(value)) or
- // even more complicated things. Like, it can handle a beast like
- // ValueRep(DoubleRep(Int52Rep(value))).
- // The only speculation that we would do beyond validating that we have a type that
- // can be represented a certain way is an Int32 check that would appear on Int52Rep
- // nodes. For now, if we see this and the final type we want is an Int52, we use it
- // as an excuse not to fold. The only thing we would need is a Int52RepInt32Use kind.
- bool hadInt32Check = false;
- if (m_node->op() == Int52Rep) {
- if (m_node->child1().useKind() != Int32Use)
- break;
- hadInt32Check = true;
- }
- for (Node* node = m_node->child1().node(); ; node = node->child1().node()) {
- if (canonicalResultRepresentation(node->result()) ==
- canonicalResultRepresentation(m_node->result())) {
- m_insertionSet.insertCheck(m_nodeIndex, m_node);
- if (hadInt32Check) {
- // FIXME: Consider adding Int52RepInt32Use or even DoubleRepInt32Use,
- // which would be super weird. The latter would only arise in some
- // seriously circuitous conversions.
- if (canonicalResultRepresentation(node->result()) != NodeResultJS)
- break;
-
- m_insertionSet.insertCheck(
- m_nodeIndex, m_node->origin, Edge(node, Int32Use));
- }
- m_node->child1() = node->defaultEdge();
+ case UInt32ToNumber:
+ if (m_node->child1()->op() == BitURShift
+ && m_node->child1()->child2()->isConstant()) {
+ JSValue shiftAmount = m_graph.valueOfJSConstant(
+ m_node->child1()->child2().node());
+ if (shiftAmount.isInt32() && (shiftAmount.asInt32() & 0x1f)) {
m_node->convertToIdentity();
m_changed = true;
break;
}
-
- switch (node->op()) {
- case Int52Rep:
- if (node->child1().useKind() != Int32Use)
- break;
- hadInt32Check = true;
- continue;
-
- case DoubleRep:
- case ValueRep:
- continue;
-
- default:
- break;
- }
- break;
}
break;
- }
- case Flush: {
- ASSERT(m_graph.m_form != SSA);
+ case GetArrayLength:
+ if (JSArrayBufferView* view = m_graph.tryGetFoldableViewForChild1(m_node))
+ foldTypedArrayPropertyToConstant(view, jsNumber(view->length()));
+ break;
- Node* setLocal = nullptr;
- VirtualRegister local = m_node->local();
+ case GetTypedArrayByteOffset:
+ if (JSArrayBufferView* view = m_graph.tryGetFoldableView(m_node->child1().node()))
+ foldTypedArrayPropertyToConstant(view, jsNumber(view->byteOffset()));
+ break;
- for (unsigned i = m_nodeIndex; i--;) {
- Node* node = m_block->at(i);
- if (node->op() == SetLocal && node->local() == local) {
- setLocal = node;
+ case GetIndexedPropertyStorage:
+ if (JSArrayBufferView* view = m_graph.tryGetFoldableViewForChild1(m_node)) {
+ if (view->mode() != FastTypedArray) {
+ prepareToFoldTypedArray(view);
+ m_node->convertToConstantStoragePointer(view->vector());
+ m_changed = true;
break;
+ } else {
+ // FIXME: It would be awesome to be able to fold the property storage for
+ // these GC-allocated typed arrays. For now it doesn't matter because the
+ // most common use-cases for constant typed arrays involve large arrays with
+ // aliased buffer views.
+ // https://bugs.webkit.org/show_bug.cgi?id=125425
}
- if (accessesOverlap(m_graph, node, AbstractHeap(Stack, local)))
- break;
}
-
- if (!setLocal)
- break;
-
- // The Flush should become a PhantomLocal at this point. This means that we want the
- // local's value during OSR, but we don't care if the value is stored to the stack. CPS
- // rethreading can canonicalize PhantomLocals for us.
- m_node->convertFlushToPhantomLocal();
- m_graph.dethread();
- m_changed = true;
break;
- }
default:
break;
@@ -237,7 +145,8 @@ private:
void convertToIdentityOverChild(unsigned childIndex)
{
- m_insertionSet.insertCheck(m_nodeIndex, m_node);
+ m_insertionSet.insertNode(
+ m_nodeIndex, SpecNone, Phantom, m_node->codeOrigin, m_node->children);
m_node->children.removeEdge(childIndex ^ 1);
m_node->convertToIdentity();
m_changed = true;
@@ -253,26 +162,20 @@ private:
convertToIdentityOverChild(1);
}
- void handleCommutativity()
+ void foldTypedArrayPropertyToConstant(JSArrayBufferView* view, JSValue constant)
{
- // If the right side is a constant then there is nothing left to do.
- if (m_node->child2()->hasConstant())
- return;
-
- // This case ensures that optimizations that look for x + const don't also have
- // to look for const + x.
- if (m_node->child1()->hasConstant()) {
- std::swap(m_node->child1(), m_node->child2());
- m_changed = true;
- return;
- }
-
- // This case ensures that CSE is commutativity-aware.
- if (m_node->child1().node() > m_node->child2().node()) {
- std::swap(m_node->child1(), m_node->child2());
- m_changed = true;
- return;
- }
+ prepareToFoldTypedArray(view);
+ m_graph.convertToConstant(m_node, constant);
+ m_changed = true;
+ }
+
+ void prepareToFoldTypedArray(JSArrayBufferView* view)
+ {
+ m_insertionSet.insertNode(
+ m_nodeIndex, SpecNone, TypedArrayWatchpoint, m_node->codeOrigin,
+ OpInfo(view));
+ m_insertionSet.insertNode(
+ m_nodeIndex, SpecNone, Phantom, m_node->codeOrigin, m_node->children);
}
InsertionSet m_insertionSet;
diff --git a/Source/JavaScriptCore/dfg/DFGStructureAbstractValue.cpp b/Source/JavaScriptCore/dfg/DFGStructureAbstractValue.cpp
deleted file mode 100644
index f77f06a74..000000000
--- a/Source/JavaScriptCore/dfg/DFGStructureAbstractValue.cpp
+++ /dev/null
@@ -1,399 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGStructureAbstractValue.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGGraph.h"
-
-namespace JSC { namespace DFG {
-
-// Comment out the empty SAMPLE() definition, and uncomment the one that uses SamplingRegion, if
-// you want extremely fine-grained profiling in this code.
-#define SAMPLE(name)
-//#define SAMPLE(name) SamplingRegion samplingRegion(name)
-
-#if !ASSERT_DISABLED
-void StructureAbstractValue::assertIsRegistered(Graph& graph) const
-{
- SAMPLE("StructureAbstractValue assertIsRegistered");
-
- if (isTop())
- return;
-
- for (unsigned i = size(); i--;)
- graph.assertIsRegistered(at(i));
-}
-#endif // !ASSERT_DISABLED
-
-void StructureAbstractValue::clobber()
-{
- SAMPLE("StructureAbstractValue clobber");
-
- // The premise of this approach to clobbering is that anytime we introduce
- // a watchable structure into an abstract value, we watchpoint it. You can assert
- // that this holds by calling assertIsWatched().
-
- if (isTop())
- return;
-
- setClobbered(true);
-
- if (m_set.isThin()) {
- if (!m_set.singleEntry())
- return;
- if (!m_set.singleEntry()->dfgShouldWatch())
- makeTopWhenThin();
- return;
- }
-
- StructureSet::OutOfLineList* list = m_set.list();
- for (unsigned i = list->m_length; i--;) {
- if (!list->list()[i]->dfgShouldWatch()) {
- makeTop();
- return;
- }
- }
-}
-
-void StructureAbstractValue::observeTransition(Structure* from, Structure* to)
-{
- SAMPLE("StructureAbstractValue observeTransition");
-
- ASSERT(!from->dfgShouldWatch());
-
- if (isTop())
- return;
-
- if (!m_set.contains(from))
- return;
-
- if (!m_set.add(to))
- return;
-
- if (m_set.size() > polymorphismLimit)
- makeTop();
-}
-
-void StructureAbstractValue::observeTransitions(const TransitionVector& vector)
-{
- SAMPLE("StructureAbstractValue observeTransitions");
-
- if (isTop())
- return;
-
- StructureSet newStructures;
- for (unsigned i = vector.size(); i--;) {
- ASSERT(!vector[i].previous->dfgShouldWatch());
-
- if (!m_set.contains(vector[i].previous))
- continue;
-
- newStructures.add(vector[i].next);
- }
-
- if (!m_set.merge(newStructures))
- return;
-
- if (m_set.size() > polymorphismLimit)
- makeTop();
-}
-
-bool StructureAbstractValue::add(Structure* structure)
-{
- SAMPLE("StructureAbstractValue add");
-
- if (isTop())
- return false;
-
- if (!m_set.add(structure))
- return false;
-
- if (m_set.size() > polymorphismLimit)
- makeTop();
-
- return true;
-}
-
-bool StructureAbstractValue::merge(const StructureSet& other)
-{
- SAMPLE("StructureAbstractValue merge set");
-
- if (isTop())
- return false;
-
- return mergeNotTop(other);
-}
-
-bool StructureAbstractValue::mergeSlow(const StructureAbstractValue& other)
-{
- SAMPLE("StructureAbstractValue merge value slow");
-
- // It isn't immediately obvious that the code below is doing the right thing, so let's go
- // through it.
- //
- // This not clobbered, other not clobbered: Clearly, we don't want to make anything clobbered
- // since we just have two sets and we are merging them. mergeNotTop() can handle this just
- // fine.
- //
- // This clobbered, other clobbered: Clobbered means that we have a set of things, plus we
- // temporarily have the set of all things but the latter will go away once we hit the next
- // invalidation point. This allows us to merge two clobbered sets the natural way. For now
- // the set will still be TOP (and so we keep the clobbered bit set), but we know that after
- // invalidation, we will have the union of the this and other.
- //
- // This clobbered, other not clobbered: It's safe to merge in other for both before and after
- // invalidation, so long as we leave the clobbered bit set. Before invalidation this has no
- // effect since the set will still appear to have all things in it. The way to think about
- // what invalidation would do is imagine if we had a set A that was clobbered and a set B
- // that wasn't and we considered the following two cases. Note that we expect A to be the
- // same at the end in both cases:
- //
- // A.merge(B) InvalidationPoint
- // InvalidationPoint A.merge(B)
- //
- // The fact that we expect A to be the same in both cases means that we want to merge other
- // into this but keep the clobbered bit.
- //
- // This not clobbered, other clobbered: This is just the converse of the previous case. We
- // want to merge other into this and set the clobbered bit.
-
- bool changed = false;
-
- if (!isClobbered() && other.isClobbered()) {
- setClobbered(true);
- changed = true;
- }
-
- changed |= mergeNotTop(other.m_set);
-
- return changed;
-}
-
-bool StructureAbstractValue::mergeNotTop(const StructureSet& other)
-{
- SAMPLE("StructureAbstractValue merge not top");
-
- if (!m_set.merge(other))
- return false;
-
- if (m_set.size() > polymorphismLimit)
- makeTop();
-
- return true;
-}
-
-void StructureAbstractValue::filter(const StructureSet& other)
-{
- SAMPLE("StructureAbstractValue filter set");
-
- if (isTop()) {
- m_set = other;
- return;
- }
-
- if (isClobbered()) {
- // We have two choices here:
- //
- // Do nothing: It's legal to keep our set intact, which would essentially mean that for
- // now, our set would behave like TOP but after the next invalidation point it wold be
- // a finite set again. This may be a good choice if 'other' is much bigger than our
- // m_set.
- //
- // Replace m_set with other and clear the clobber bit: This is also legal, and means that
- // we're no longer clobbered. This is usually better because it immediately gives us a
- // smaller set.
- //
- // This scenario should come up rarely. We usually don't do anything to an abstract value
- // after it is clobbered. But we apply some heuristics.
-
- if (other.size() > m_set.size() + clobberedSupremacyThreshold)
- return; // Keep the clobbered set.
-
- m_set = other;
- setClobbered(false);
- return;
- }
-
- m_set.filter(other);
-}
-
-void StructureAbstractValue::filter(const StructureAbstractValue& other)
-{
- SAMPLE("StructureAbstractValue filter value");
-
- if (other.isTop())
- return;
-
- if (other.isClobbered()) {
- if (isTop())
- return;
-
- if (!isClobbered()) {
- // See justification in filter(const StructureSet&), above. An unclobbered set is
- // almost always better.
- if (m_set.size() > other.m_set.size() + clobberedSupremacyThreshold)
- *this = other; // Keep the clobbered set.
- return;
- }
-
- m_set.filter(other.m_set);
- return;
- }
-
- filter(other.m_set);
-}
-
-void StructureAbstractValue::filterSlow(SpeculatedType type)
-{
- SAMPLE("StructureAbstractValue filter type slow");
-
- if (!(type & SpecCell)) {
- clear();
- return;
- }
-
- ASSERT(!isTop());
-
- m_set.genericFilter(
- [&] (Structure* structure) {
- return !!(speculationFromStructure(structure) & type);
- });
-}
-
-bool StructureAbstractValue::contains(Structure* structure) const
-{
- SAMPLE("StructureAbstractValue contains");
-
- if (isInfinite())
- return true;
-
- return m_set.contains(structure);
-}
-
-bool StructureAbstractValue::isSubsetOf(const StructureSet& other) const
-{
- SAMPLE("StructureAbstractValue isSubsetOf set");
-
- if (isInfinite())
- return false;
-
- return m_set.isSubsetOf(other);
-}
-
-bool StructureAbstractValue::isSubsetOf(const StructureAbstractValue& other) const
-{
- SAMPLE("StructureAbstractValue isSubsetOf value");
-
- if (isTop())
- return false;
-
- if (other.isTop())
- return true;
-
- if (isClobbered() == other.isClobbered())
- return m_set.isSubsetOf(other.m_set);
-
- // Here it gets tricky. If in doubt, return false!
-
- if (isClobbered())
- return false; // A clobbered set is never a subset of an unclobbered set.
-
- // An unclobbered set is currently a subset of a clobbered set, but it may not be so after
- // invalidation.
- return m_set.isSubsetOf(other.m_set);
-}
-
-bool StructureAbstractValue::isSupersetOf(const StructureSet& other) const
-{
- SAMPLE("StructureAbstractValue isSupersetOf set");
-
- if (isInfinite())
- return true;
-
- return m_set.isSupersetOf(other);
-}
-
-bool StructureAbstractValue::overlaps(const StructureSet& other) const
-{
- SAMPLE("StructureAbstractValue overlaps set");
-
- if (isInfinite())
- return true;
-
- return m_set.overlaps(other);
-}
-
-bool StructureAbstractValue::overlaps(const StructureAbstractValue& other) const
-{
- SAMPLE("StructureAbstractValue overlaps value");
-
- if (other.isInfinite())
- return true;
-
- return overlaps(other.m_set);
-}
-
-bool StructureAbstractValue::equalsSlow(const StructureAbstractValue& other) const
-{
- SAMPLE("StructureAbstractValue equalsSlow");
-
- ASSERT(m_set.m_pointer != other.m_set.m_pointer);
- ASSERT(!isTop());
- ASSERT(!other.isTop());
-
- return m_set == other.m_set
- && isClobbered() == other.isClobbered();
-}
-
-void StructureAbstractValue::dumpInContext(PrintStream& out, DumpContext* context) const
-{
- if (isClobbered())
- out.print("Clobbered:");
-
- if (isTop())
- out.print("TOP");
- else
- out.print(inContext(m_set, context));
-}
-
-void StructureAbstractValue::dump(PrintStream& out) const
-{
- dumpInContext(out, 0);
-}
-
-void StructureAbstractValue::validateReferences(const TrackedReferences& trackedReferences) const
-{
- if (isTop())
- return;
- m_set.validateReferences(trackedReferences);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGStructureAbstractValue.h b/Source/JavaScriptCore/dfg/DFGStructureAbstractValue.h
index 16f59ab11..54d3bd29b 100644
--- a/Source/JavaScriptCore/dfg/DFGStructureAbstractValue.h
+++ b/Source/JavaScriptCore/dfg/DFGStructureAbstractValue.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,234 +26,313 @@
#ifndef DFGStructureAbstractValue_h
#define DFGStructureAbstractValue_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
-#include "DFGTransition.h"
#include "JSCell.h"
#include "SpeculatedType.h"
#include "DumpContext.h"
#include "StructureSet.h"
-namespace JSC {
-
-class TrackedReferences;
-
-namespace DFG {
+namespace JSC { namespace DFG {
class StructureAbstractValue {
public:
- StructureAbstractValue() { }
- StructureAbstractValue(Structure* structure)
- : m_set(StructureSet(structure))
+ StructureAbstractValue()
+ : m_structure(0)
{
- setClobbered(false);
}
- StructureAbstractValue(const StructureSet& other)
- : m_set(other)
+
+ StructureAbstractValue(Structure* structure)
+ : m_structure(structure)
{
- setClobbered(false);
}
- ALWAYS_INLINE StructureAbstractValue(const StructureAbstractValue& other)
- : m_set(other.m_set)
+
+ StructureAbstractValue(const StructureSet& set)
{
- setClobbered(other.isClobbered());
+ switch (set.size()) {
+ case 0:
+ m_structure = 0;
+ break;
+
+ case 1:
+ m_structure = set[0];
+ break;
+
+ default:
+ m_structure = topValue();
+ break;
+ }
}
- ALWAYS_INLINE StructureAbstractValue& operator=(Structure* structure)
+ void clear()
{
- m_set = StructureSet(structure);
- setClobbered(false);
- return *this;
+ m_structure = 0;
}
- ALWAYS_INLINE StructureAbstractValue& operator=(const StructureSet& other)
+
+ void makeTop()
{
- m_set = other;
- setClobbered(false);
- return *this;
+ m_structure = topValue();
}
- ALWAYS_INLINE StructureAbstractValue& operator=(const StructureAbstractValue& other)
+
+ static StructureAbstractValue top()
{
- m_set = other.m_set;
- setClobbered(other.isClobbered());
- return *this;
+ StructureAbstractValue value;
+ value.makeTop();
+ return value;
}
- void clear()
+ void add(Structure* structure)
{
- m_set.clear();
- setClobbered(false);
+ ASSERT(!contains(structure) && !isTop());
+ if (m_structure)
+ makeTop();
+ else
+ m_structure = structure;
}
- void makeTop()
+ bool addAll(const StructureSet& other)
{
- m_set.deleteListIfNecessary();
- m_set.m_pointer = topValue;
+ if (isTop() || !other.size())
+ return false;
+ if (other.size() > 1) {
+ makeTop();
+ return true;
+ }
+ if (!m_structure) {
+ m_structure = other[0];
+ return true;
+ }
+ if (m_structure == other[0])
+ return false;
+ makeTop();
+ return true;
}
-#if ASSERT_DISABLED
- void assertIsRegistered(Graph&) const { }
-#else
- void assertIsRegistered(Graph&) const;
-#endif
-
- void clobber();
- void observeInvalidationPoint() { setClobbered(false); }
-
- void observeTransition(Structure* from, Structure* to);
- void observeTransitions(const TransitionVector&);
-
- static StructureAbstractValue top()
+ bool addAll(const StructureAbstractValue& other)
{
- StructureAbstractValue result;
- result.m_set.m_pointer = topValue;
- return result;
+ if (!other.m_structure)
+ return false;
+ if (isTop())
+ return false;
+ if (other.isTop()) {
+ makeTop();
+ return true;
+ }
+ if (m_structure) {
+ if (m_structure == other.m_structure)
+ return false;
+ makeTop();
+ return true;
+ }
+ m_structure = other.m_structure;
+ return true;
}
- bool isClear() const { return m_set.isEmpty(); }
- bool isTop() const { return m_set.m_pointer == topValue; }
- bool isNeitherClearNorTop() const { return !isClear() && !isTop(); }
-
- // A clobbered abstract value means that the set currently contains the m_set set of
- // structures plus TOP, except that the "plus TOP" will go away at the next invalidation
- // point. Note that it's tempting to think of this as "the set of structures in m_set plus
- // the set of structures transition-reachable from m_set" - but this isn't really correct,
- // since if we add an unwatchable structure after clobbering, the two definitions are not
- // equivalent. If we do this, the new unwatchable structure will be added to m_set.
- // Invalidation points do not try to "clip" the set of transition-reachable structures from
- // m_set by looking at reachability as this would mean that the new set is TOP. Instead they
- // literally assume that the set is just m_set rather than m_set plus TOP.
- bool isClobbered() const { return m_set.getReservedFlag(); }
-
- // A finite structure abstract value is one where enumerating over it will yield all
- // of the structures that the value may have right now. This is true so long as we're
- // neither top nor clobbered.
- bool isFinite() const { return !isTop() && !isClobbered(); }
-
- // An infinite structure abstract value may currently have any structure.
- bool isInfinite() const { return !isFinite(); }
+ bool contains(Structure* structure) const
+ {
+ if (isTop())
+ return true;
+ if (m_structure == structure)
+ return true;
+ return false;
+ }
- bool add(Structure* structure);
+ bool isSubsetOf(const StructureSet& other) const
+ {
+ if (isTop())
+ return false;
+ if (!m_structure)
+ return true;
+ return other.contains(m_structure);
+ }
- bool merge(const StructureSet& other);
+ bool doesNotContainAnyOtherThan(Structure* structure) const
+ {
+ if (isTop())
+ return false;
+ if (!m_structure)
+ return true;
+ return m_structure == structure;
+ }
- ALWAYS_INLINE bool merge(const StructureAbstractValue& other)
+ bool isSupersetOf(const StructureSet& other) const
{
- if (other.isClear())
+ if (isTop())
+ return true;
+ if (!other.size())
+ return true;
+ if (other.size() > 1)
return false;
-
+ return m_structure == other[0];
+ }
+
+ bool isSubsetOf(const StructureAbstractValue& other) const
+ {
+ if (other.isTop())
+ return true;
if (isTop())
return false;
+ if (m_structure) {
+ if (other.m_structure)
+ return m_structure == other.m_structure;
+ return false;
+ }
+ return true;
+ }
+
+ bool isSupersetOf(const StructureAbstractValue& other) const
+ {
+ return other.isSubsetOf(*this);
+ }
+
+ void filter(const StructureSet& other)
+ {
+ if (!m_structure)
+ return;
- if (other.isTop()) {
- makeTop();
- return true;
+ if (isTop()) {
+ switch (other.size()) {
+ case 0:
+ m_structure = 0;
+ return;
+
+ case 1:
+ m_structure = other[0];
+ return;
+
+ default:
+ return;
+ }
}
- return mergeSlow(other);
+ if (other.contains(m_structure))
+ return;
+
+ m_structure = 0;
}
- void filter(const StructureSet& other);
- void filter(const StructureAbstractValue& other);
-
- ALWAYS_INLINE void filter(SpeculatedType type)
+ void filter(const StructureAbstractValue& other)
{
- if (!(type & SpecCell)) {
- clear();
+ if (isTop()) {
+ m_structure = other.m_structure;
return;
}
- if (isNeitherClearNorTop())
- filterSlow(type);
+ if (m_structure == other.m_structure)
+ return;
+ if (other.isTop())
+ return;
+ m_structure = 0;
}
- ALWAYS_INLINE bool operator==(const StructureAbstractValue& other) const
+ void filter(SpeculatedType other)
{
- if ((m_set.isThin() && other.m_set.isThin()) || isTop() || other.isTop())
- return m_set.m_pointer == other.m_set.m_pointer;
+ if (!(other & SpecCell)) {
+ clear();
+ return;
+ }
- return equalsSlow(other);
+ if (isClearOrTop())
+ return;
+
+ if (!(speculationFromStructure(m_structure) & other))
+ m_structure = 0;
}
- const StructureSet& set() const
+ bool isClear() const
{
- ASSERT(!isTop());
- return m_set;
+ return !m_structure;
}
+ bool isTop() const { return m_structure == topValue(); }
+
+ bool isClearOrTop() const { return m_structure <= topValue(); }
+ bool isNeitherClearNorTop() const { return !isClearOrTop(); }
+
size_t size() const
{
ASSERT(!isTop());
- return m_set.size();
+ return !!m_structure;
}
Structure* at(size_t i) const
{
ASSERT(!isTop());
- return m_set.at(i);
+ ASSERT(m_structure);
+ ASSERT_UNUSED(i, !i);
+ return m_structure;
}
- Structure* operator[](size_t i) const { return at(i); }
-
- // In most cases, what you really want to do is verify whether the set is top or clobbered, and
- // if not, enumerate the set of structures. Use this only in cases where the singleton case is
- // meaningfully special, like for transitions.
- Structure* onlyStructure() const
+ Structure* operator[](size_t i) const
{
- if (isInfinite())
- return nullptr;
- return m_set.onlyStructure();
+ return at(i);
}
- void dumpInContext(PrintStream&, DumpContext*) const;
- void dump(PrintStream&) const;
-
- // The methods below are all conservative and err on the side of making 'this' appear bigger
- // than it is. For example, contains() may return true if the set is clobbered or TOP.
- // isSubsetOf() may return false in case of ambiguities. Therefore you should only perform
- // optimizations as a consequence of the "this is smaller" return value - so false for
- // contains(), true for isSubsetOf(), false for isSupersetOf(), and false for overlaps().
-
- bool contains(Structure* structure) const;
-
- bool isSubsetOf(const StructureSet& other) const;
- bool isSubsetOf(const StructureAbstractValue& other) const;
-
- bool isSupersetOf(const StructureSet& other) const;
- bool isSupersetOf(const StructureAbstractValue& other) const
+ Structure* last() const
{
- return other.isSubsetOf(*this);
+ return at(0);
}
- bool overlaps(const StructureSet& other) const;
- bool overlaps(const StructureAbstractValue& other) const;
-
- void validateReferences(const TrackedReferences&) const;
-
-private:
- static const uintptr_t clobberedFlag = StructureSet::reservedFlag;
- static const uintptr_t topValue = StructureSet::reservedValue;
- static const unsigned polymorphismLimit = 10;
- static const unsigned clobberedSupremacyThreshold = 2;
+ SpeculatedType speculationFromStructures() const
+ {
+ if (isTop())
+ return SpecCell;
+ if (isClear())
+ return SpecNone;
+ return speculationFromStructure(m_structure);
+ }
- void filterSlow(SpeculatedType type);
- bool mergeSlow(const StructureAbstractValue& other);
+ bool isValidOffset(PropertyOffset offset)
+ {
+ if (isTop())
+ return false;
+ if (isClear())
+ return true;
+ return m_structure->isValidOffset(offset);
+ }
- bool equalsSlow(const StructureAbstractValue& other) const;
+ bool hasSingleton() const
+ {
+ return isNeitherClearNorTop();
+ }
- void makeTopWhenThin()
+ Structure* singleton() const
{
- ASSERT(m_set.isThin());
- m_set.m_pointer = topValue;
+ ASSERT(isNeitherClearNorTop());
+ return m_structure;
}
- bool mergeNotTop(const StructureSet& other);
+ bool operator==(const StructureAbstractValue& other) const
+ {
+ return m_structure == other.m_structure;
+ }
- void setClobbered(bool clobbered)
+ void dumpInContext(PrintStream& out, DumpContext* context) const
{
- ASSERT(!isTop() || !clobbered);
- m_set.setReservedFlag(clobbered);
+ if (isTop()) {
+ out.print("TOP");
+ return;
+ }
+
+ out.print("[");
+ if (m_structure)
+ out.print(inContext(*m_structure, context));
+ out.print("]");
+ }
+
+ void dump(PrintStream& out) const
+ {
+ dumpInContext(out, 0);
}
+
+private:
+ static Structure* topValue() { return reinterpret_cast<Structure*>(1); }
+
+ // NB. This must have a trivial destructor.
- StructureSet m_set;
+ // This can only remember one structure at a time.
+ Structure* m_structure;
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGStructureClobberState.h b/Source/JavaScriptCore/dfg/DFGStructureClobberState.h
deleted file mode 100644
index ac4275af3..000000000
--- a/Source/JavaScriptCore/dfg/DFGStructureClobberState.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGStructureClobberState_h
-#define DFGStructureClobberState_h
-
-#if ENABLE(DFG_JIT)
-
-#include <wtf/PrintStream.h>
-
-namespace JSC { namespace DFG {
-
-enum StructureClobberState {
- StructuresAreWatched, // Constants with watchable structures must have those structures.
- StructuresAreClobbered // Constants with watchable structures could have any structure.
-};
-
-inline StructureClobberState merge(StructureClobberState a, StructureClobberState b)
-{
- switch (a) {
- case StructuresAreWatched:
- return b;
- case StructuresAreClobbered:
- return StructuresAreClobbered;
- }
- RELEASE_ASSERT_NOT_REACHED();
- return StructuresAreClobbered;
-}
-
-} } // namespace JSC::DFG
-
-namespace WTF {
-
-inline void printInternal(PrintStream& out, JSC::DFG::StructureClobberState state)
-{
- switch (state) {
- case JSC::DFG::StructuresAreWatched:
- out.print("StructuresAreWatched");
- return;
- case JSC::DFG::StructuresAreClobbered:
- out.print("StructuresAreClobbered");
- return;
- }
- RELEASE_ASSERT_NOT_REACHED();
-}
-
-} // namespace WTF
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGStructureClobberState_h
diff --git a/Source/JavaScriptCore/dfg/DFGStructureRegistrationPhase.cpp b/Source/JavaScriptCore/dfg/DFGStructureRegistrationPhase.cpp
deleted file mode 100644
index ac9273cdb..000000000
--- a/Source/JavaScriptCore/dfg/DFGStructureRegistrationPhase.cpp
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGStructureRegistrationPhase.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGBasicBlockInlines.h"
-#include "DFGGraph.h"
-#include "DFGPhase.h"
-#include "JSCInlines.h"
-
-namespace JSC { namespace DFG {
-
-class StructureRegistrationPhase : public Phase {
-public:
- StructureRegistrationPhase(Graph& graph)
- : Phase(graph, "structure registration")
- {
- }
-
- bool run()
- {
- // FIXME: This phase shouldn't exist. We should have registered all structures by now, since
- // we may already have done optimizations that rely on structures having been registered.
- // Currently, we still have places where we don't register structures prior to this phase,
- // but structures don't end up being used for optimization prior to this phase. That's a
- // pretty fragile situation and we should fix it eventually.
- // https://bugs.webkit.org/show_bug.cgi?id=147889
-
- // We need to set this before this phase finishes. This phase doesn't do anything
- // conditioned on this field, except for assertIsRegistered() below. We intend for that
- // method to behave as if the phase was already finished. So, we set this up here.
- m_graph.m_structureRegistrationState = AllStructuresAreRegistered;
-
- // These are pretty dumb, but needed to placate subsequent assertions. We don't actually
- // have to watch these because there is no way to transition away from it, but they are
- // watchable and so we will assert if they aren't watched.
- registerStructure(m_graph.m_vm.structureStructure.get());
- registerStructure(m_graph.m_vm.stringStructure.get());
- registerStructure(m_graph.m_vm.getterSetterStructure.get());
-
- for (FrozenValue* value : m_graph.m_frozenValues)
- assertIsRegistered(value->structure());
-
- for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
- BasicBlock* block = m_graph.block(blockIndex);
- if (!block)
- continue;
-
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
-
- switch (node->op()) {
- case CheckStructure:
- assertAreRegistered(node->structureSet());
- break;
-
- case NewObject:
- case ArrayifyToStructure:
- case NewStringObject:
- registerStructure(node->structure());
- break;
-
- case PutStructure:
- case AllocatePropertyStorage:
- case ReallocatePropertyStorage:
- registerStructure(node->transition()->previous);
- registerStructure(node->transition()->next);
- break;
-
- case MultiGetByOffset:
- for (const MultiGetByOffsetCase& getCase : node->multiGetByOffsetData().cases)
- registerStructures(getCase.set());
- break;
-
- case MultiPutByOffset:
- for (unsigned i = node->multiPutByOffsetData().variants.size(); i--;) {
- PutByIdVariant& variant = node->multiPutByOffsetData().variants[i];
- registerStructures(variant.oldStructure());
- if (variant.kind() == PutByIdVariant::Transition)
- registerStructure(variant.newStructure());
- }
- break;
-
- case NewArray:
- case NewArrayBuffer:
- registerStructure(m_graph.globalObjectFor(node->origin.semantic)->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()));
- break;
-
- case NewTypedArray:
- registerStructure(m_graph.globalObjectFor(node->origin.semantic)->typedArrayStructure(node->typedArrayType()));
- break;
-
- case ToString:
- case CallStringConstructor:
- registerStructure(m_graph.globalObjectFor(node->origin.semantic)->stringObjectStructure());
- break;
-
- case CreateActivation:
- registerStructure(m_graph.globalObjectFor(node->origin.semantic)->activationStructure());
- break;
-
- case CreateDirectArguments:
- registerStructure(m_graph.globalObjectFor(node->origin.semantic)->directArgumentsStructure());
- break;
-
- case CreateScopedArguments:
- registerStructure(m_graph.globalObjectFor(node->origin.semantic)->scopedArgumentsStructure());
- break;
-
- case NewRegexp:
- registerStructure(m_graph.globalObjectFor(node->origin.semantic)->regExpStructure());
- break;
-
- case NewFunction:
- registerStructure(m_graph.globalObjectFor(node->origin.semantic)->functionStructure());
- break;
-
- default:
- break;
- }
- }
- }
-
- return true;
- }
-
-private:
- void registerStructures(const StructureSet& set)
- {
- for (Structure* structure : set)
- registerStructure(structure);
- }
-
- void registerStructure(Structure* structure)
- {
- if (structure)
- m_graph.registerStructure(structure);
- }
-
- void assertAreRegistered(const StructureSet& set)
- {
- for (Structure* structure : set)
- assertIsRegistered(structure);
- }
-
- void assertIsRegistered(Structure* structure)
- {
- if (structure)
- m_graph.assertIsRegistered(structure);
- }
-};
-
-bool performStructureRegistration(Graph& graph)
-{
- SamplingRegion samplingRegion("DFG Structure Registration Phase");
- return runPhase<StructureRegistrationPhase>(graph);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGStructureRegistrationPhase.h b/Source/JavaScriptCore/dfg/DFGStructureRegistrationPhase.h
deleted file mode 100644
index bba789164..000000000
--- a/Source/JavaScriptCore/dfg/DFGStructureRegistrationPhase.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGStructureRegistrationPhase_h
-#define DFGStructureRegistrationPhase_h
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-class Graph;
-
-// Registers any structures we know about as weak references, and sets watchpoints on any
-// such structures that we know of that are currently watchable. It's somewhat
-// counterintuitive, but this ends up being the cleanest and most effective way of reducing
-// structure checks on terminal structures:
-//
-// - We used to only set watchpoints on watchable structures if we knew that this would
-// remove a structure check. Experiments show that switching from that, to blindly
-// setting watchpoints on all watchable structures, was not a regression.
-//
-// - It makes abstract interpretation a whole lot easier. We just assume that watchable
-// structures are unclobberable without having to do any other logic.
-
-bool performStructureRegistration(Graph&);
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGStructureRegistrationPhase_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGThreadData.cpp b/Source/JavaScriptCore/dfg/DFGThreadData.cpp
deleted file mode 100644
index d605b0092..000000000
--- a/Source/JavaScriptCore/dfg/DFGThreadData.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGThreadData.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "JSCInlines.h"
-
-namespace JSC { namespace DFG {
-
-ThreadData::ThreadData(Worklist* worklist)
- : m_worklist(worklist)
- , m_identifier(0)
- , m_safepoint(nullptr)
-{
-}
-
-ThreadData::~ThreadData()
-{
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGThunks.cpp b/Source/JavaScriptCore/dfg/DFGThunks.cpp
index 560aedf6e..c0935b95a 100644
--- a/Source/JavaScriptCore/dfg/DFGThunks.cpp
+++ b/Source/JavaScriptCore/dfg/DFGThunks.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,9 +32,7 @@
#include "DFGOSRExitCompiler.h"
#include "FPRInfo.h"
#include "GPRInfo.h"
-#include "LinkBuffer.h"
#include "MacroAssembler.h"
-#include "JSCInlines.h"
namespace JSC { namespace DFG {
@@ -88,53 +86,13 @@ MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM* vm)
jit.jump(MacroAssembler::AbsoluteAddress(&vm->osrExitJumpDestination));
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
patchBuffer.link(functionCall, compileOSRExit);
return FINALIZE_CODE(patchBuffer, ("DFG OSR exit generation thunk"));
}
-MacroAssemblerCodeRef osrEntryThunkGenerator(VM* vm)
-{
- MacroAssembler jit;
-
- // We get passed the address of a scratch buffer. The first 8-byte slot of the buffer
- // is the frame size. The second 8-byte slot is the pointer to where we are supposed to
- // jump. The remaining bytes are the new call frame header followed by the locals.
-
- ptrdiff_t offsetOfFrameSize = 0; // This is the DFG frame count.
- ptrdiff_t offsetOfTargetPC = offsetOfFrameSize + sizeof(EncodedJSValue);
- ptrdiff_t offsetOfPayload = offsetOfTargetPC + sizeof(EncodedJSValue);
- ptrdiff_t offsetOfLocals = offsetOfPayload + sizeof(Register) * JSStack::CallFrameHeaderSize;
-
- jit.move(GPRInfo::returnValueGPR2, GPRInfo::regT0);
- jit.loadPtr(MacroAssembler::Address(GPRInfo::regT0, offsetOfFrameSize), GPRInfo::regT1); // Load the frame size.
- jit.move(GPRInfo::regT1, GPRInfo::regT2);
- jit.lshiftPtr(MacroAssembler::Imm32(3), GPRInfo::regT2);
- jit.move(GPRInfo::callFrameRegister, MacroAssembler::stackPointerRegister);
- jit.subPtr(GPRInfo::regT2, MacroAssembler::stackPointerRegister);
-
- MacroAssembler::Label loop = jit.label();
- jit.subPtr(MacroAssembler::TrustedImm32(1), GPRInfo::regT1);
- jit.move(GPRInfo::regT1, GPRInfo::regT4);
- jit.negPtr(GPRInfo::regT4);
- jit.load32(MacroAssembler::BaseIndex(GPRInfo::regT0, GPRInfo::regT1, MacroAssembler::TimesEight, offsetOfLocals), GPRInfo::regT2);
- jit.load32(MacroAssembler::BaseIndex(GPRInfo::regT0, GPRInfo::regT1, MacroAssembler::TimesEight, offsetOfLocals + sizeof(int32_t)), GPRInfo::regT3);
- jit.store32(GPRInfo::regT2, MacroAssembler::BaseIndex(GPRInfo::callFrameRegister, GPRInfo::regT4, MacroAssembler::TimesEight, -static_cast<intptr_t>(sizeof(Register))));
- jit.store32(GPRInfo::regT3, MacroAssembler::BaseIndex(GPRInfo::callFrameRegister, GPRInfo::regT4, MacroAssembler::TimesEight, -static_cast<intptr_t>(sizeof(Register)) + static_cast<intptr_t>(sizeof(int32_t))));
- jit.branchPtr(MacroAssembler::NotEqual, GPRInfo::regT1, MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(-static_cast<intptr_t>(JSStack::CallFrameHeaderSize)))).linkTo(loop, &jit);
-
- jit.loadPtr(MacroAssembler::Address(GPRInfo::regT0, offsetOfTargetPC), GPRInfo::regT1);
- MacroAssembler::Jump ok = jit.branchPtr(MacroAssembler::Above, GPRInfo::regT1, MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000))));
- jit.abortWithReason(DFGUnreasonableOSREntryJumpDestination);
- ok.link(&jit);
- jit.jump(GPRInfo::regT1);
-
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, ("DFG OSR entry thunk"));
-}
-
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGThunks.h b/Source/JavaScriptCore/dfg/DFGThunks.h
index 6ef0c50cc..60bfea634 100644
--- a/Source/JavaScriptCore/dfg/DFGThunks.h
+++ b/Source/JavaScriptCore/dfg/DFGThunks.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGThunks_h
#define DFGThunks_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "MacroAssemblerCodeRef.h"
@@ -37,7 +39,6 @@ class VM;
namespace DFG {
MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM*);
-MacroAssemblerCodeRef osrEntryThunkGenerator(VM*);
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.cpp b/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.cpp
index 5f509c41e..d51a1f0d8 100644
--- a/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,7 +32,7 @@
#include "DFGInsertionSet.h"
#include "DFGPhase.h"
#include "FTLCapabilities.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -47,109 +47,45 @@ public:
{
RELEASE_ASSERT(m_graph.m_plan.mode == DFGMode);
- if (!Options::useFTLJIT())
+ if (!Options::useExperimentalFTL())
return false;
-
- if (m_graph.m_profiledBlock->m_didFailFTLCompilation)
- return false;
-
+
#if ENABLE(FTL_JIT)
FTL::CapabilityLevel level = FTL::canCompile(m_graph);
if (level == FTL::CannotCompile)
return false;
- if (!Options::enableOSREntryToFTL())
- level = FTL::CanCompile;
-
- // First we find all the loops that contain a LoopHint for which we cannot OSR enter.
- // We use that information to decide if we need CheckTierUpAndOSREnter or CheckTierUpWithNestedTriggerAndOSREnter.
- NaturalLoops& naturalLoops = m_graph.m_naturalLoops;
- naturalLoops.computeIfNecessary(m_graph);
-
- HashSet<const NaturalLoop*> loopsContainingLoopHintWithoutOSREnter = findLoopsContainingLoopHintWithoutOSREnter(naturalLoops, level);
-
InsertionSet insertionSet(m_graph);
for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
- if (node->op() != LoopHint)
- continue;
-
- NodeOrigin origin = node->origin;
- if (canOSREnterAtLoopHint(level, block, nodeIndex)) {
- const NaturalLoop* loop = naturalLoops.innerMostLoopOf(block);
- if (loop && loopsContainingLoopHintWithoutOSREnter.contains(loop))
- insertionSet.insertNode(nodeIndex + 1, SpecNone, CheckTierUpWithNestedTriggerAndOSREnter, origin);
- else
- insertionSet.insertNode(nodeIndex + 1, SpecNone, CheckTierUpAndOSREnter, origin);
+ if (block->at(0)->op() == LoopHint) {
+ CodeOrigin codeOrigin = block->at(0)->codeOrigin;
+ NodeType nodeType;
+ if (level == FTL::CanCompileAndOSREnter && !codeOrigin.inlineCallFrame) {
+ nodeType = CheckTierUpAndOSREnter;
+ RELEASE_ASSERT(block->bytecodeBegin == codeOrigin.bytecodeIndex);
} else
- insertionSet.insertNode(nodeIndex + 1, SpecNone, CheckTierUpInLoop, origin);
- break;
+ nodeType = CheckTierUpInLoop;
+ insertionSet.insertNode(1, SpecNone, nodeType, codeOrigin);
}
- NodeAndIndex terminal = block->findTerminal();
- if (terminal.node->op() == Return) {
+ if (block->last()->op() == Return) {
insertionSet.insertNode(
- terminal.index, SpecNone, CheckTierUpAtReturn, terminal.node->origin);
+ block->size() - 1, SpecNone, CheckTierUpAtReturn, block->last()->codeOrigin);
}
insertionSet.execute(block);
}
- m_graph.m_plan.willTryToTierUp = true;
return true;
#else // ENABLE(FTL_JIT)
RELEASE_ASSERT_NOT_REACHED();
return false;
#endif // ENABLE(FTL_JIT)
}
-
-private:
-#if ENABLE(FTL_JIT)
- bool canOSREnterAtLoopHint(FTL::CapabilityLevel level, const BasicBlock* block, unsigned nodeIndex)
- {
- Node* node = block->at(nodeIndex);
- ASSERT(node->op() == LoopHint);
-
- NodeOrigin origin = node->origin;
- if (level != FTL::CanCompileAndOSREnter || origin.semantic.inlineCallFrame)
- return false;
-
- // We only put OSR checks for the first LoopHint in the block. Note that
- // more than one LoopHint could happen in cases where we did a lot of CFG
- // simplification in the bytecode parser, but it should be very rare.
- for (unsigned subNodeIndex = nodeIndex; subNodeIndex--;) {
- if (!block->at(subNodeIndex)->isSemanticallySkippable())
- return false;
- }
- return true;
- }
-
- HashSet<const NaturalLoop*> findLoopsContainingLoopHintWithoutOSREnter(const NaturalLoops& naturalLoops, FTL::CapabilityLevel level)
- {
- HashSet<const NaturalLoop*> loopsContainingLoopHintWithoutOSREnter;
- for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
- if (node->op() != LoopHint)
- continue;
-
- if (!canOSREnterAtLoopHint(level, block, nodeIndex)) {
- const NaturalLoop* loop = naturalLoops.innerMostLoopOf(block);
- while (loop) {
- loopsContainingLoopHintWithoutOSREnter.add(loop);
- loop = naturalLoops.innerMostOuterLoop(*loop);
- }
- }
- }
- }
- return loopsContainingLoopHintWithoutOSREnter;
- }
-#endif
};
bool performTierUpCheckInjection(Graph& graph)
diff --git a/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.h b/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.h
index 25e935589..f6e799ad8 100644
--- a/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGTierUpCheckInjectionPhase_h
#define DFGTierUpCheckInjectionPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.cpp b/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.cpp
index dd8108a76..7770fd627 100644
--- a/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.cpp
+++ b/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.cpp
@@ -31,7 +31,6 @@
#include "CodeBlock.h"
#include "DFGJITCode.h"
#include "Executable.h"
-#include "JSCInlines.h"
namespace JSC { namespace DFG {
@@ -43,9 +42,10 @@ ToFTLDeferredCompilationCallback::ToFTLDeferredCompilationCallback(
ToFTLDeferredCompilationCallback::~ToFTLDeferredCompilationCallback() { }
-Ref<ToFTLDeferredCompilationCallback> ToFTLDeferredCompilationCallback::create(PassRefPtr<CodeBlock> dfgCodeBlock)
+PassRefPtr<ToFTLDeferredCompilationCallback> ToFTLDeferredCompilationCallback::create(
+ PassRefPtr<CodeBlock> dfgCodeBlock)
{
- return adoptRef(*new ToFTLDeferredCompilationCallback(dfgCodeBlock));
+ return adoptRef(new ToFTLDeferredCompilationCallback(dfgCodeBlock));
}
void ToFTLDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously(
@@ -84,8 +84,6 @@ void ToFTLDeferredCompilationCallback::compilationDidComplete(
m_dfgCodeBlock->jitCode()->dfg()->setOptimizationThresholdBasedOnCompilationResult(
m_dfgCodeBlock.get(), result);
-
- DeferredCompilationCallback::compilationDidComplete(codeBlock, result);
}
} } // JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.h b/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.h
index 3e0ea02d0..a4d840b20 100644
--- a/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.h
+++ b/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.h
@@ -26,6 +26,8 @@
#ifndef DFGToFTLDeferredCompilationCallback_h
#define DFGToFTLDeferredCompilationCallback_h
+#include <wtf/Platform.h>
+
#if ENABLE(FTL_JIT)
#include "DeferredCompilationCallback.h"
@@ -45,7 +47,8 @@ protected:
public:
virtual ~ToFTLDeferredCompilationCallback();
- static Ref<ToFTLDeferredCompilationCallback> create(PassRefPtr<CodeBlock> dfgCodeBlock);
+ static PassRefPtr<ToFTLDeferredCompilationCallback> create(
+ PassRefPtr<CodeBlock> dfgCodeBlock);
virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*);
virtual void compilationDidComplete(CodeBlock*, CompilationResult);
diff --git a/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp b/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp
index 70cc82502..17b45a328 100644
--- a/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp
+++ b/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp
@@ -31,7 +31,6 @@
#include "CodeBlock.h"
#include "DFGJITCode.h"
#include "Executable.h"
-#include "JSCInlines.h"
namespace JSC { namespace DFG {
@@ -45,10 +44,11 @@ ToFTLForOSREntryDeferredCompilationCallback::~ToFTLForOSREntryDeferredCompilatio
{
}
-Ref<ToFTLForOSREntryDeferredCompilationCallback>ToFTLForOSREntryDeferredCompilationCallback::create(
+PassRefPtr<ToFTLForOSREntryDeferredCompilationCallback>
+ToFTLForOSREntryDeferredCompilationCallback::create(
PassRefPtr<CodeBlock> dfgCodeBlock)
{
- return adoptRef(*new ToFTLForOSREntryDeferredCompilationCallback(dfgCodeBlock));
+ return adoptRef(new ToFTLForOSREntryDeferredCompilationCallback(dfgCodeBlock));
}
void ToFTLForOSREntryDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously(
@@ -73,24 +73,13 @@ void ToFTLForOSREntryDeferredCompilationCallback::compilationDidComplete(
") result: ", result, "\n");
}
- JITCode* jitCode = m_dfgCodeBlock->jitCode()->dfg();
-
- switch (result) {
- case CompilationSuccessful:
- jitCode->osrEntryBlock = codeBlock;
- break;
- case CompilationFailed:
- jitCode->osrEntryRetry = 0;
- jitCode->abandonOSREntry = true;
- break;
- case CompilationDeferred:
- RELEASE_ASSERT_NOT_REACHED();
- case CompilationInvalidated:
- jitCode->osrEntryRetry = 0;
- break;
- }
+ if (result == CompilationSuccessful)
+ m_dfgCodeBlock->jitCode()->dfg()->osrEntryBlock = codeBlock;
- DeferredCompilationCallback::compilationDidComplete(codeBlock, result);
+ // FIXME: if we failed, we might want to just turn off OSR entry rather than
+ // totally turning off tier-up.
+ m_dfgCodeBlock->jitCode()->dfg()->setOptimizationThresholdBasedOnCompilationResult(
+ m_dfgCodeBlock.get(), result);
}
} } // JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.h b/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.h
index c9dcf6d7c..af6b97b16 100644
--- a/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.h
+++ b/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.h
@@ -26,6 +26,8 @@
#ifndef DFGToFTLForOSREntryDeferredCompilationCallback_h
#define DFGToFTLForOSREntryDeferredCompilationCallback_h
+#include <wtf/Platform.h>
+
#if ENABLE(FTL_JIT)
#include "DeferredCompilationCallback.h"
@@ -45,7 +47,8 @@ protected:
public:
virtual ~ToFTLForOSREntryDeferredCompilationCallback();
- static Ref<ToFTLForOSREntryDeferredCompilationCallback> create(PassRefPtr<CodeBlock> dfgCodeBlock);
+ static PassRefPtr<ToFTLForOSREntryDeferredCompilationCallback> create(
+ PassRefPtr<CodeBlock> dfgCodeBlock);
virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*);
virtual void compilationDidComplete(CodeBlock*, CompilationResult);
diff --git a/Source/JavaScriptCore/dfg/DFGTransition.h b/Source/JavaScriptCore/dfg/DFGTransition.h
deleted file mode 100644
index 49a654436..000000000
--- a/Source/JavaScriptCore/dfg/DFGTransition.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGTransition_h
-#define DFGTransition_h
-
-#if ENABLE(DFG_JIT)
-
-#include <wtf/PrintStream.h>
-#include <wtf/Vector.h>
-
-namespace JSC {
-
-class Structure;
-struct DumpContext;
-
-namespace DFG {
-
-struct Transition {
- Structure* previous;
- Structure* next;
-
- Transition()
- : previous(nullptr)
- , next(nullptr)
- {
- }
-
- Transition(Structure* previous, Structure* next)
- : previous(previous)
- , next(next)
- {
- }
-
- void dumpInContext(PrintStream&, DumpContext*) const;
- void dump(PrintStream&) const;
-};
-
-typedef Vector<Transition, 3> TransitionVector;
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGTransition_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.cpp b/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.cpp
index 3416edfe3..5625ef4f0 100644
--- a/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,7 +33,7 @@
#include "DFGInsertionSet.h"
#include "DFGPhase.h"
#include "DFGVariableAccessDataDump.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include <wtf/HashMap.h>
namespace JSC { namespace DFG {
@@ -115,6 +115,7 @@ public:
// from the node, before doing any appending.
switch (node->op()) {
case SetArgument: {
+ ASSERT(!blockIndex);
// Insert a GetLocal and a CheckStructure immediately following this
// SetArgument, if the variable was a candidate for structure hoisting.
// If the basic block previously only had the SetArgument as its
@@ -126,23 +127,20 @@ public:
if (!iter->value.m_structure && !iter->value.m_arrayModeIsValid)
break;
- // Currently we should only be doing this hoisting for SetArguments at the prologue.
- ASSERT(!blockIndex);
-
- NodeOrigin origin = node->origin;
+ CodeOrigin codeOrigin = node->codeOrigin;
Node* getLocal = insertionSet.insertNode(
- indexInBlock + 1, variable->prediction(), GetLocal, origin,
+ indexInBlock + 1, variable->prediction(), GetLocal, codeOrigin,
OpInfo(variable), Edge(node));
if (iter->value.m_structure) {
insertionSet.insertNode(
- indexInBlock + 1, SpecNone, CheckStructure, origin,
+ indexInBlock + 1, SpecNone, CheckStructure, codeOrigin,
OpInfo(m_graph.addStructureSet(iter->value.m_structure)),
Edge(getLocal, CellUse));
} else if (iter->value.m_arrayModeIsValid) {
ASSERT(iter->value.m_arrayModeHoistingOkay);
insertionSet.insertNode(
- indexInBlock + 1, SpecNone, CheckArray, origin,
+ indexInBlock + 1, SpecNone, CheckArray, codeOrigin,
OpInfo(iter->value.m_arrayMode.asWord()),
Edge(getLocal, CellUse));
} else
@@ -165,18 +163,18 @@ public:
if (!iter->value.m_structure && !iter->value.m_arrayModeIsValid)
break;
- NodeOrigin origin = node->origin;
+ CodeOrigin codeOrigin = node->codeOrigin;
Edge child1 = node->child1();
if (iter->value.m_structure) {
insertionSet.insertNode(
- indexInBlock, SpecNone, CheckStructure, origin,
+ indexInBlock, SpecNone, CheckStructure, codeOrigin,
OpInfo(m_graph.addStructureSet(iter->value.m_structure)),
Edge(child1.node(), CellUse));
} else if (iter->value.m_arrayModeIsValid) {
ASSERT(iter->value.m_arrayModeHoistingOkay);
insertionSet.insertNode(
- indexInBlock, SpecNone, CheckArray, origin,
+ indexInBlock, SpecNone, CheckArray, codeOrigin,
OpInfo(iter->value.m_arrayMode.asWord()),
Edge(child1.node(), CellUse));
} else
@@ -217,7 +215,8 @@ private:
for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
Node* node = block->at(indexInBlock);
switch (node->op()) {
- case CheckStructure: {
+ case CheckStructure:
+ case StructureTransitionWatchpoint: {
Node* child = node->child1().node();
if (child->op() != GetLocal)
break;
@@ -228,9 +227,7 @@ private:
noticeStructureCheck(variable, node->structureSet());
break;
}
-
- case ArrayifyToStructure:
- case Arrayify:
+
case GetByOffset:
case PutByOffset:
case PutStructure:
@@ -247,11 +244,25 @@ private:
case GetTypedArrayByteOffset:
case Phantom:
case MovHint:
- case MultiGetByOffset:
- case MultiPutByOffset:
// Don't count these uses.
break;
+ case ArrayifyToStructure:
+ case Arrayify:
+ if (node->arrayMode().conversion() == Array::RageConvert) {
+ // Rage conversion changes structures. We should avoid tying to do
+ // any kind of hoisting when rage conversion is in play.
+ Node* child = node->child1().node();
+ if (child->op() != GetLocal)
+ break;
+ VariableAccessData* variable = child->variableAccessData();
+ variable->vote(VoteOther);
+ if (!shouldConsiderForHoisting<StructureTypeCheck>(variable))
+ break;
+ noticeStructureCheck(variable, 0);
+ }
+ break;
+
case SetLocal: {
// Find all uses of the source of the SetLocal. If any of them are a
// kind of CheckStructure, then we should notice them to ensure that
@@ -271,6 +282,13 @@ private:
noticeStructureCheck(variable, subNode->structureSet());
break;
}
+ case StructureTransitionWatchpoint: {
+ if (subNode->child1() != source)
+ break;
+
+ noticeStructureCheck(variable, subNode->structure());
+ break;
+ }
default:
break;
}
@@ -310,6 +328,7 @@ private:
}
case CheckStructure:
+ case StructureTransitionWatchpoint:
case GetByOffset:
case PutByOffset:
case PutStructure:
@@ -323,8 +342,6 @@ private:
case GetIndexedPropertyStorage:
case Phantom:
case MovHint:
- case MultiGetByOffset:
- case MultiPutByOffset:
// Don't count these uses.
break;
@@ -363,6 +380,13 @@ private:
noticeStructureCheckAccountingForArrayMode(variable, subNode->structureSet());
break;
}
+ case StructureTransitionWatchpoint: {
+ if (subNode->child1() != source)
+ break;
+
+ noticeStructureCheckAccountingForArrayMode(variable, subNode->structure());
+ break;
+ }
case CheckArray: {
if (subNode->child1() != source)
break;
@@ -473,7 +497,7 @@ private:
noticeStructureCheck(variable, 0);
return;
}
- noticeStructureCheck(variable, set.onlyStructure());
+ noticeStructureCheck(variable, set.singletonStructure());
}
void noticeCheckArray(VariableAccessData* variable, ArrayMode arrayMode)
diff --git a/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.h b/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.h
index 756dfb1f1..97adaf2e7 100644
--- a/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGTypeCheckHoistingPhase_h
#define DFGTypeCheckHoistingPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGUnificationPhase.cpp b/Source/JavaScriptCore/dfg/DFGUnificationPhase.cpp
index 0722dd9cb..8f2929d5c 100644
--- a/Source/JavaScriptCore/dfg/DFGUnificationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGUnificationPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,7 +31,7 @@
#include "DFGBasicBlockInlines.h"
#include "DFGGraph.h"
#include "DFGPhase.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -70,6 +70,7 @@ public:
for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) {
VariableAccessData* data = &m_graph.m_variableAccessData[i];
data->find()->predict(data->nonUnifiedPrediction());
+ data->find()->mergeIsCaptured(data->isCaptured());
data->find()->mergeStructureCheckHoistingFailed(data->structureCheckHoistingFailed());
data->find()->mergeCheckArrayHoistingFailed(data->checkArrayHoistingFailed());
data->find()->mergeShouldNeverUnbox(data->shouldNeverUnbox());
diff --git a/Source/JavaScriptCore/dfg/DFGUnificationPhase.h b/Source/JavaScriptCore/dfg/DFGUnificationPhase.h
index cb93b70f8..6713de78e 100644
--- a/Source/JavaScriptCore/dfg/DFGUnificationPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGUnificationPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGUnificationPhase_h
#define DFGUnificationPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGUseKind.cpp b/Source/JavaScriptCore/dfg/DFGUseKind.cpp
index 3442341c8..073eb0e66 100644
--- a/Source/JavaScriptCore/dfg/DFGUseKind.cpp
+++ b/Source/JavaScriptCore/dfg/DFGUseKind.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,8 +28,6 @@
#if ENABLE(DFG_JIT)
-#include "JSCInlines.h"
-
namespace WTF {
using namespace JSC::DFG;
@@ -39,93 +37,68 @@ void printInternal(PrintStream& out, UseKind useKind)
switch (useKind) {
case UntypedUse:
out.print("Untyped");
- return;
+ break;
case Int32Use:
out.print("Int32");
- return;
+ break;
case KnownInt32Use:
out.print("KnownInt32");
- return;
- case Int52RepUse:
- out.print("Int52Rep");
- return;
+ break;
case MachineIntUse:
out.print("MachineInt");
- return;
- case NumberUse:
- out.print("Number");
- return;
+ break;
case RealNumberUse:
out.print("RealNumber");
- return;
- case DoubleRepUse:
- out.print("DoubleRep");
- return;
- case DoubleRepRealUse:
- out.print("DoubleRepReal");
- return;
- case DoubleRepMachineIntUse:
- out.print("DoubleRepMachineInt");
- return;
+ break;
+ case NumberUse:
+ out.print("Number");
+ break;
+ case KnownNumberUse:
+ out.print("KnownNumber");
+ break;
case BooleanUse:
out.print("Boolean");
- return;
- case KnownBooleanUse:
- out.print("KnownBoolean");
- return;
+ break;
case CellUse:
out.print("Cell");
- return;
+ break;
case KnownCellUse:
out.print("KnownCell");
- return;
+ break;
case ObjectUse:
out.print("Object");
- return;
- case FunctionUse:
- out.print("Function");
- return;
+ break;
case FinalObjectUse:
out.print("FinalObject");
- return;
+ break;
case ObjectOrOtherUse:
out.print("ObjectOrOther");
- return;
+ break;
case StringIdentUse:
out.print("StringIdent");
- return;
+ break;
case StringUse:
out.print("String");
- return;
+ break;
case KnownStringUse:
out.print("KnownString");
- return;
- case SymbolUse:
- out.print("Symbol");
- return;
+ break;
case StringObjectUse:
out.print("StringObject");
- return;
+ break;
case StringOrStringObjectUse:
out.print("StringOrStringObject");
- return;
- case NotStringVarUse:
- out.print("NotStringVar");
- return;
+ break;
case NotCellUse:
out.print("NotCell");
- return;
+ break;
case OtherUse:
out.print("Other");
- return;
- case MiscUse:
- out.print("Misc");
- return;
- case LastUseKind:
+ break;
+ default:
RELEASE_ASSERT_NOT_REACHED();
- return;
+ break;
}
- RELEASE_ASSERT_NOT_REACHED();
}
} // namespace WTF
diff --git a/Source/JavaScriptCore/dfg/DFGUseKind.h b/Source/JavaScriptCore/dfg/DFGUseKind.h
index 14c1e9567..7ad390524 100644
--- a/Source/JavaScriptCore/dfg/DFGUseKind.h
+++ b/Source/JavaScriptCore/dfg/DFGUseKind.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,60 +26,40 @@
#ifndef DFGUseKind_h
#define DFGUseKind_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
-#include "DFGNodeFlags.h"
#include "SpeculatedType.h"
#include <wtf/PrintStream.h>
namespace JSC { namespace DFG {
enum UseKind {
- // The DFG has 3 representations of values used:
-
- // 1. The JSValue representation for a JSValue that must be stored in a GP
- // register (or a GP register pair), and follows rules for boxing and unboxing
- // that allow the JSValue to be stored as either fully boxed JSValues, or
- // unboxed Int32, Booleans, Cells, etc. in 32-bit as appropriate.
- UntypedUse, // UntypedUse must come first (value 0).
+ UntypedUse,
Int32Use,
KnownInt32Use,
MachineIntUse,
- NumberUse,
RealNumberUse,
+ NumberUse,
+ KnownNumberUse,
BooleanUse,
- KnownBooleanUse,
CellUse,
KnownCellUse,
ObjectUse,
- FunctionUse,
FinalObjectUse,
ObjectOrOtherUse,
StringIdentUse,
StringUse,
KnownStringUse,
- SymbolUse,
StringObjectUse,
StringOrStringObjectUse,
- NotStringVarUse,
NotCellUse,
OtherUse,
- MiscUse,
-
- // 2. The Double representation for an unboxed double value that must be stored
- // in an FP register.
- DoubleRepUse,
- DoubleRepRealUse,
- DoubleRepMachineIntUse,
-
- // 3. The Int52 representation for an unboxed integer value that must be stored
- // in a GP register.
- Int52RepUse,
-
LastUseKind // Must always be the last entry in the enum, as it is used to denote the number of enum elements.
};
-inline SpeculatedType typeFilterFor(UseKind useKind)
+ALWAYS_INLINE SpeculatedType typeFilterFor(UseKind useKind)
{
switch (useKind) {
case UntypedUse:
@@ -87,30 +67,20 @@ inline SpeculatedType typeFilterFor(UseKind useKind)
case Int32Use:
case KnownInt32Use:
return SpecInt32;
- case Int52RepUse:
- return SpecMachineInt;
case MachineIntUse:
- return SpecInt32 | SpecInt52AsDouble;
- case NumberUse:
- return SpecBytecodeNumber;
+ return SpecMachineInt;
case RealNumberUse:
- return SpecBytecodeRealNumber;
- case DoubleRepUse:
- return SpecFullDouble;
- case DoubleRepRealUse:
- return SpecDoubleReal;
- case DoubleRepMachineIntUse:
- return SpecInt52AsDouble;
+ return SpecFullRealNumber;
+ case NumberUse:
+ case KnownNumberUse:
+ return SpecFullNumber;
case BooleanUse:
- case KnownBooleanUse:
return SpecBoolean;
case CellUse:
case KnownCellUse:
return SpecCell;
case ObjectUse:
return SpecObject;
- case FunctionUse:
- return SpecFunction;
case FinalObjectUse:
return SpecFinalObject;
case ObjectOrOtherUse:
@@ -120,89 +90,76 @@ inline SpeculatedType typeFilterFor(UseKind useKind)
case StringUse:
case KnownStringUse:
return SpecString;
- case SymbolUse:
- return SpecSymbol;
case StringObjectUse:
return SpecStringObject;
case StringOrStringObjectUse:
return SpecString | SpecStringObject;
- case NotStringVarUse:
- return ~SpecStringVar;
case NotCellUse:
return ~SpecCell;
case OtherUse:
return SpecOther;
- case MiscUse:
- return SpecMisc;
default:
RELEASE_ASSERT_NOT_REACHED();
return SpecFullTop;
}
}
-inline bool shouldNotHaveTypeCheck(UseKind kind)
+ALWAYS_INLINE bool shouldNotHaveTypeCheck(UseKind kind)
{
switch (kind) {
case UntypedUse:
case KnownInt32Use:
+ case KnownNumberUse:
case KnownCellUse:
case KnownStringUse:
- case KnownBooleanUse:
- case Int52RepUse:
- case DoubleRepUse:
return true;
default:
return false;
}
}
-inline bool mayHaveTypeCheck(UseKind kind)
+ALWAYS_INLINE bool mayHaveTypeCheck(UseKind kind)
{
return !shouldNotHaveTypeCheck(kind);
}
-inline bool isNumerical(UseKind kind)
+ALWAYS_INLINE bool isNumerical(UseKind kind)
{
switch (kind) {
case Int32Use:
case KnownInt32Use:
- case NumberUse:
- case RealNumberUse:
- case Int52RepUse:
- case DoubleRepUse:
- case DoubleRepRealUse:
case MachineIntUse:
- case DoubleRepMachineIntUse:
+ case RealNumberUse:
+ case NumberUse:
+ case KnownNumberUse:
return true;
default:
return false;
}
}
-inline bool isDouble(UseKind kind)
+ALWAYS_INLINE bool isDouble(UseKind kind)
{
switch (kind) {
- case DoubleRepUse:
- case DoubleRepRealUse:
- case DoubleRepMachineIntUse:
+ case RealNumberUse:
+ case NumberUse:
+ case KnownNumberUse:
return true;
default:
return false;
}
}
-inline bool isCell(UseKind kind)
+ALWAYS_INLINE bool isCell(UseKind kind)
{
switch (kind) {
case CellUse:
case KnownCellUse:
case ObjectUse:
- case FunctionUse:
case FinalObjectUse:
case StringIdentUse:
case StringUse:
case KnownStringUse:
- case SymbolUse:
case StringObjectUse:
case StringOrStringObjectUse:
return true;
@@ -213,7 +170,7 @@ inline bool isCell(UseKind kind)
// Returns true if it uses structure in a way that could be clobbered by
// things that change the structure.
-inline bool usesStructure(UseKind kind)
+ALWAYS_INLINE bool usesStructure(UseKind kind)
{
switch (kind) {
case StringObjectUse:
@@ -224,30 +181,6 @@ inline bool usesStructure(UseKind kind)
}
}
-// Returns true if we've already guaranteed the type
-inline bool alreadyChecked(UseKind kind, SpeculatedType type)
-{
- // If the check involves the structure then we need to know more than just the type to be sure
- // that the check is done.
- if (usesStructure(kind))
- return false;
-
- return !(type & ~typeFilterFor(kind));
-}
-
-inline UseKind useKindForResult(NodeFlags result)
-{
- ASSERT(!(result & ~NodeResultMask));
- switch (result) {
- case NodeResultInt52:
- return Int52RepUse;
- case NodeResultDouble:
- return DoubleRepUse;
- default:
- return UntypedUse;
- }
-}
-
} } // namespace JSC::DFG
namespace WTF {
diff --git a/Source/JavaScriptCore/dfg/DFGValidate.cpp b/Source/JavaScriptCore/dfg/DFGValidate.cpp
index 6a134d048..09bea406f 100644
--- a/Source/JavaScriptCore/dfg/DFGValidate.cpp
+++ b/Source/JavaScriptCore/dfg/DFGValidate.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,8 +29,6 @@
#if ENABLE(DFG_JIT)
#include "CodeBlockWithJITType.h"
-#include "DFGMayExit.h"
-#include "JSCInlines.h"
#include <wtf/Assertions.h>
#include <wtf/BitVector.h>
@@ -38,19 +36,17 @@ namespace JSC { namespace DFG {
class Validate {
public:
- Validate(Graph& graph, GraphDumpMode graphDumpMode, CString graphDumpBeforePhase)
+ Validate(Graph& graph, GraphDumpMode graphDumpMode)
: m_graph(graph)
, m_graphDumpMode(graphDumpMode)
- , m_graphDumpBeforePhase(graphDumpBeforePhase)
{
}
#define VALIDATE(context, assertion) do { \
if (!(assertion)) { \
- startCrashing(); \
dataLogF("\n\n\nAt "); \
reportValidationContext context; \
- dataLogF(": validation failed: %s (%s:%d).\n", #assertion, __FILE__, __LINE__); \
+ dataLogF(": validation %s (%s:%d) failed.\n", #assertion, __FILE__, __LINE__); \
dumpGraphIfAppropriate(); \
WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
CRASH(); \
@@ -59,14 +55,13 @@ public:
#define V_EQUAL(context, left, right) do { \
if (left != right) { \
- startCrashing(); \
dataLogF("\n\n\nAt "); \
reportValidationContext context; \
- dataLogF(": validation failed: (%s = ", #left); \
+ dataLogF(": validation (%s = ", #left); \
dataLog(left); \
dataLogF(") == (%s = ", #right); \
dataLog(right); \
- dataLogF(") (%s:%d).\n", __FILE__, __LINE__); \
+ dataLogF(") (%s:%d) failed.\n", __FILE__, __LINE__); \
dumpGraphIfAppropriate(); \
WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #left " == " #right); \
CRASH(); \
@@ -74,7 +69,7 @@ public:
} while (0)
#define notSet (static_cast<size_t>(-1))
-
+
void validate()
{
// NB. This code is not written for performance, since it is not intended to run
@@ -118,9 +113,6 @@ public:
continue;
m_myRefCounts.find(edge.node())->value++;
-
- validateEdgeWithDoubleResultIfNecessary(node, edge);
- VALIDATE((node, edge), edge->hasInt52Result() == (edge.useKind() == Int52RepUse));
if (m_graph.m_form == SSA) {
// In SSA, all edges must hasResult().
@@ -146,6 +138,31 @@ public:
break;
VALIDATE((node, edge), edge->variableAccessData() == node->variableAccessData());
break;
+ case Phantom:
+ switch (m_graph.m_form) {
+ case LoadStore:
+ if (j) {
+ VALIDATE((node, edge), edge->hasResult());
+ break;
+ }
+ switch (edge->op()) {
+ case Phi:
+ case SetArgument:
+ case SetLocal:
+ break;
+ default:
+ VALIDATE((node, edge), edge->hasResult());
+ break;
+ }
+ break;
+ case ThreadedCPS:
+ VALIDATE((node, edge), edge->hasResult());
+ break;
+ case SSA:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ break;
default:
VALIDATE((node, edge), edge->hasResult());
break;
@@ -162,103 +179,8 @@ public:
Node* node = block->node(i);
if (m_graph.m_refCountState == ExactRefCount)
V_EQUAL((node), m_myRefCounts.get(node), node->adjustedRefCount());
- }
-
- bool foundTerminal = false;
- for (size_t i = 0 ; i < block->size(); ++i) {
- Node* node = block->at(i);
- if (node->isTerminal()) {
- foundTerminal = true;
- for (size_t j = i + 1; j < block->size(); ++j) {
- node = block->at(j);
- VALIDATE((node), node->op() == Phantom || node->op() == PhantomLocal || node->op() == Flush || node->op() == Check);
- m_graph.doToChildren(
- node,
- [&] (Edge edge) {
- VALIDATE((node, edge), shouldNotHaveTypeCheck(edge.useKind()));
- });
- }
- break;
- }
- }
- VALIDATE((block), foundTerminal);
-
- for (size_t i = 0; i < block->size(); ++i) {
- Node* node = block->at(i);
-
- VALIDATE((node), node->origin.semantic.isSet() == node->origin.forExit.isSet());
- VALIDATE((node), !mayExit(m_graph, node) || node->origin.forExit.isSet());
- VALIDATE((node), !node->hasStructure() || !!node->structure());
- VALIDATE((node), !node->hasCellOperand() || node->cellOperand()->value().isCell());
- VALIDATE((node), !node->hasCellOperand() || !!node->cellOperand()->value());
-
- if (!(node->flags() & NodeHasVarArgs)) {
- if (!node->child2())
- VALIDATE((node), !node->child3());
- if (!node->child1())
- VALIDATE((node), !node->child2());
- }
-
- switch (node->op()) {
- case Identity:
- VALIDATE((node), canonicalResultRepresentation(node->result()) == canonicalResultRepresentation(node->child1()->result()));
- break;
- case SetLocal:
- case PutStack:
- case Upsilon:
- VALIDATE((node), !!node->child1());
- switch (node->child1().useKind()) {
- case UntypedUse:
- case CellUse:
- case Int32Use:
- case Int52RepUse:
- case DoubleRepUse:
- case BooleanUse:
- break;
- default:
- VALIDATE((node), !"Bad use kind");
- break;
- }
- break;
- case MakeRope:
- case ValueAdd:
- case ArithAdd:
- case ArithSub:
- case ArithMul:
- case ArithIMul:
- case ArithDiv:
- case ArithMod:
- case ArithMin:
- case ArithMax:
- case ArithPow:
- case CompareLess:
- case CompareLessEq:
- case CompareGreater:
- case CompareGreaterEq:
- case CompareEq:
- case CompareEqConstant:
- case CompareStrictEq:
- VALIDATE((node), !!node->child1());
- VALIDATE((node), !!node->child2());
- break;
- case PutStructure:
- VALIDATE((node), !node->transition()->previous->dfgShouldWatch());
- break;
- case MultiPutByOffset:
- for (unsigned i = node->multiPutByOffsetData().variants.size(); i--;) {
- const PutByIdVariant& variant = node->multiPutByOffsetData().variants[i];
- if (variant.kind() != PutByIdVariant::Transition)
- continue;
- VALIDATE((node), !variant.oldStructureForTransition()->dfgShouldWatch());
- }
- break;
- case DoubleConstant:
- case Int52Constant:
- VALIDATE((node), node->isNumberConstant());
- break;
- default:
- break;
- }
+ else
+ V_EQUAL((node), node->refCount(), 1);
}
}
@@ -277,7 +199,6 @@ public:
private:
Graph& m_graph;
GraphDumpMode m_graphDumpMode;
- CString m_graphDumpBeforePhase;
HashMap<Node*, unsigned> m_myRefCounts;
HashSet<Node*> m_acceptableNodes;
@@ -407,7 +328,6 @@ private:
Node* node = block->at(i);
ASSERT(nodesInThisBlock.contains(node));
VALIDATE((node), node->op() != Phi);
- VALIDATE((node), node->origin.forExit.isSet());
for (unsigned j = 0; j < m_graph.numChildren(node); ++j) {
Edge edge = m_graph.child(node, j);
if (!edge)
@@ -418,63 +338,39 @@ private:
case GetLocal:
case Flush:
break;
+ case Phantom:
+ if (m_graph.m_form == LoadStore && !j)
+ break;
+ FALLTHROUGH;
default:
VALIDATE((node, edge), !phisInThisBlock.contains(edge.node()));
break;
}
}
- switch (node->op()) {
- case Phi:
- case Upsilon:
- case CheckInBounds:
- case PhantomNewObject:
- case PhantomNewFunction:
- case PhantomCreateActivation:
- case GetMyArgumentByVal:
- case PutHint:
- case CheckStructureImmediate:
- case MaterializeNewObject:
- case MaterializeCreateActivation:
- case PutStack:
- case KillStack:
- case GetStack:
- VALIDATE((node), !"unexpected node type in CPS");
- break;
- case Phantom:
- VALIDATE((node), m_graph.m_fixpointState != FixpointNotConverged);
- break;
- default:
- break;
- }
-
if (!node->shouldGenerate())
continue;
switch (node->op()) {
case GetLocal:
+ if (node->variableAccessData()->isCaptured())
+ break;
// Ignore GetLocal's that we know to be dead, but that the graph
// doesn't yet know to be dead.
if (!m_myRefCounts.get(node))
break;
- if (m_graph.m_form == ThreadedCPS) {
+ if (m_graph.m_form == ThreadedCPS)
VALIDATE((node, block), getLocalPositions.operand(node->local()) == notSet);
- VALIDATE((node, block), !!node->child1());
- }
getLocalPositions.operand(node->local()) = i;
break;
case SetLocal:
+ if (node->variableAccessData()->isCaptured())
+ break;
// Only record the first SetLocal. There may be multiple SetLocals
// because of flushing.
if (setLocalPositions.operand(node->local()) != notSet)
break;
setLocalPositions.operand(node->local()) = i;
break;
- case SetArgument:
- // This acts like a reset. It's ok to have a second GetLocal for a local in the same
- // block if we had a SetArgument for that local.
- getLocalPositions.operand(node->local()) = notSet;
- setLocalPositions.operand(node->local()) = notSet;
- break;
default:
break;
}
@@ -504,81 +400,30 @@ private:
if (!block)
continue;
- VALIDATE((block), block->phis.isEmpty());
-
unsigned nodeIndex = 0;
- for (; nodeIndex < block->size() && !block->at(nodeIndex)->origin.forExit.isSet(); nodeIndex++) { }
+ for (; nodeIndex < block->size() && !block->at(nodeIndex)->codeOrigin.isSet(); nodeIndex++) { }
VALIDATE((block), nodeIndex < block->size());
for (; nodeIndex < block->size(); nodeIndex++)
- VALIDATE((block->at(nodeIndex)), block->at(nodeIndex)->origin.forExit.isSet());
+ VALIDATE((block->at(nodeIndex)), block->at(nodeIndex)->codeOrigin.isSet());
for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
Node* node = block->at(nodeIndex);
switch (node->op()) {
case Phi:
- VALIDATE((node), !node->origin.forExit.isSet());
+ VALIDATE((node), !node->codeOrigin.isSet());
break;
- case GetLocal:
- case SetLocal:
- case GetLocalUnlinked:
- case SetArgument:
- case Phantom:
- VALIDATE((node), !"bad node type for SSA");
- break;
-
default:
// FIXME: Add more things here.
// https://bugs.webkit.org/show_bug.cgi?id=123471
break;
}
- switch (node->op()) {
- case PhantomNewObject:
- case PhantomNewFunction:
- case PhantomCreateActivation:
- case PhantomDirectArguments:
- case PhantomClonedArguments:
- case MovHint:
- case Upsilon:
- case ForwardVarargs:
- case CallForwardVarargs:
- case ConstructForwardVarargs:
- case GetMyArgumentByVal:
- break;
-
- case Check:
- // FIXME: This is probably not correct.
- break;
-
- case PutHint:
- VALIDATE((node), node->child1()->isPhantomAllocation());
- break;
-
- default:
- m_graph.doToChildren(
- node,
- [&] (const Edge& edge) {
- VALIDATE((node), !edge->isPhantomAllocation());
- });
- break;
- }
}
}
}
-
- void validateEdgeWithDoubleResultIfNecessary(Node* node, Edge edge)
- {
- if (!edge->hasDoubleResult())
- return;
-
- if (m_graph.m_planStage < PlanStage::AfterFixup)
- return;
-
- VALIDATE((node, edge), edge.useKind() == DoubleRepUse || edge.useKind() == DoubleRepRealUse || edge.useKind() == DoubleRepMachineIntUse);
- }
-
+
void checkOperand(
BasicBlock* block, Operands<size_t>& getLocalPositions,
Operands<size_t>& setLocalPositions, VirtualRegister operand)
@@ -613,23 +458,23 @@ private:
void reportValidationContext(VirtualRegister local, BasicBlock* block)
{
if (!block) {
- dataLog(local, " in null Block ");
+ dataLog("r", local, " in null Block ");
return;
}
- dataLog(local, " in Block ", *block);
+ dataLog("r", local, " in Block ", *block);
}
void reportValidationContext(
VirtualRegister local, BasicBlock* sourceBlock, BasicBlock* destinationBlock)
{
- dataLog(local, " in Block ", *sourceBlock, " -> ", *destinationBlock);
+ dataLog("r", local, " in Block ", *sourceBlock, " -> ", *destinationBlock);
}
void reportValidationContext(
VirtualRegister local, BasicBlock* sourceBlock, Node* prevNode)
{
- dataLog(prevNode, " for ", local, " in Block ", *sourceBlock);
+ dataLog(prevNode, " for r", local, " in Block ", *sourceBlock);
}
void reportValidationContext(Node* node, BasicBlock* block)
@@ -652,19 +497,14 @@ private:
{
if (m_graphDumpMode == DontDumpGraph)
return;
- dataLog("\n");
- if (!m_graphDumpBeforePhase.isNull()) {
- dataLog("Before phase:\n");
- dataLog(m_graphDumpBeforePhase);
- }
dataLog("At time of failure:\n");
m_graph.dump();
}
};
-void validate(Graph& graph, GraphDumpMode graphDumpMode, CString graphDumpBeforePhase)
+void validate(Graph& graph, GraphDumpMode graphDumpMode)
{
- Validate validationObject(graph, graphDumpMode, graphDumpBeforePhase);
+ Validate validationObject(graph, graphDumpMode);
validationObject.validate();
}
diff --git a/Source/JavaScriptCore/dfg/DFGValidate.h b/Source/JavaScriptCore/dfg/DFGValidate.h
index ff4d06bdd..92aa293e3 100644
--- a/Source/JavaScriptCore/dfg/DFGValidate.h
+++ b/Source/JavaScriptCore/dfg/DFGValidate.h
@@ -26,6 +26,8 @@
#ifndef DFGValidate_h
#define DFGValidate_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
@@ -35,7 +37,7 @@ namespace JSC { namespace DFG {
enum GraphDumpMode { DontDumpGraph, DumpGraph };
-void validate(Graph&, GraphDumpMode = DumpGraph, CString graphDumpBeforePhase = CString());
+void validate(Graph&, GraphDumpMode = DumpGraph);
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGThreadData.h b/Source/JavaScriptCore/dfg/DFGValueRecoveryOverride.h
index d86cf9078..89de9fd08 100644
--- a/Source/JavaScriptCore/dfg/DFGThreadData.h
+++ b/Source/JavaScriptCore/dfg/DFGValueRecoveryOverride.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,38 +23,35 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DFGThreadData_h
-#define DFGThreadData_h
+#ifndef DFGValueRecoveryOverride_h
+#define DFGValueRecoveryOverride_h
+
+#include <wtf/Platform.h>
#if ENABLE(DFG_JIT)
-#include <wtf/Lock.h>
-#include <wtf/Threading.h>
+#include "ValueRecovery.h"
+#include <wtf/RefCounted.h>
namespace JSC { namespace DFG {
-class Safepoint;
-class Worklist;
-
-class ThreadData {
- WTF_MAKE_FAST_ALLOCATED;
+class ValueRecoveryOverride : public RefCounted<ValueRecoveryOverride> {
public:
- ThreadData(Worklist*);
- ~ThreadData();
+ ValueRecoveryOverride() { }
-private:
- friend class Safepoint;
- friend class Worklist;
+ ValueRecoveryOverride(VirtualRegister operand, const ValueRecovery& recovery)
+ : operand(operand)
+ , recovery(recovery)
+ {
+ }
- Worklist* m_worklist;
- ThreadIdentifier m_identifier;
- Lock m_rightToRun;
- Safepoint* m_safepoint;
+ VirtualRegister operand;
+ ValueRecovery recovery;
};
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
-#endif // DFGThreadData_h
+#endif // DFGValueRecoveryOverride_h
diff --git a/Source/JavaScriptCore/dfg/DFGValueSource.cpp b/Source/JavaScriptCore/dfg/DFGValueSource.cpp
index 41d8b475a..51cf78847 100644
--- a/Source/JavaScriptCore/dfg/DFGValueSource.cpp
+++ b/Source/JavaScriptCore/dfg/DFGValueSource.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2014, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,8 +28,6 @@
#if ENABLE(DFG_JIT)
-#include "JSCInlines.h"
-
namespace JSC { namespace DFG {
void ValueSource::dump(PrintStream& out) const
@@ -42,22 +40,25 @@ void ValueSource::dump(PrintStream& out) const
out.print("IsDead");
break;
case ValueInJSStack:
- out.print("JS:", virtualRegister());
+ out.print("JS:r", virtualRegister());
break;
case Int32InJSStack:
- out.print("Int32:", virtualRegister());
+ out.print("Int32:r", virtualRegister());
break;
case Int52InJSStack:
- out.print("Int52:", virtualRegister());
+ out.print("Int52:r", virtualRegister());
break;
case CellInJSStack:
- out.print("Cell:", virtualRegister());
+ out.print("Cell:r", virtualRegister());
break;
case BooleanInJSStack:
- out.print("Bool:", virtualRegister());
+ out.print("Bool:r", virtualRegister());
break;
case DoubleInJSStack:
- out.print("Double:", virtualRegister());
+ out.print("Double:r", virtualRegister());
+ break;
+ case ArgumentsSource:
+ out.print("Arguments");
break;
case HaveNode:
out.print("Node(", m_value, ")");
@@ -68,11 +69,6 @@ void ValueSource::dump(PrintStream& out) const
}
}
-void ValueSource::dumpInContext(PrintStream& out, DumpContext*) const
-{
- dump(out);
-}
-
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGValueSource.h b/Source/JavaScriptCore/dfg/DFGValueSource.h
index 1b55797d5..1e56f654f 100644
--- a/Source/JavaScriptCore/dfg/DFGValueSource.h
+++ b/Source/JavaScriptCore/dfg/DFGValueSource.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGValueSource_h
#define DFGValueSource_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
@@ -45,6 +47,7 @@ enum ValueSourceKind {
CellInJSStack,
BooleanInJSStack,
DoubleInJSStack,
+ ArgumentsSource,
SourceIsDead,
HaveNode
};
@@ -64,6 +67,8 @@ static inline ValueSourceKind dataFormatToValueSourceKind(DataFormat dataFormat)
return CellInJSStack;
case DataFormatDead:
return SourceIsDead;
+ case DataFormatArguments:
+ return ArgumentsSource;
default:
RELEASE_ASSERT(dataFormat & DataFormatJS);
return ValueInJSStack;
@@ -85,6 +90,8 @@ static inline DataFormat valueSourceKindToDataFormat(ValueSourceKind kind)
return DataFormatBoolean;
case DoubleInJSStack:
return DataFormatDouble;
+ case ArgumentsSource:
+ return DataFormatArguments;
case SourceIsDead:
return DataFormatDead;
default:
@@ -115,7 +122,7 @@ public:
explicit ValueSource(ValueSourceKind valueSourceKind)
: m_kind(valueSourceKind)
{
- ASSERT(kind() == SourceIsDead);
+ ASSERT(kind() == ArgumentsSource || kind() == SourceIsDead || kind() == ArgumentsSource);
}
explicit ValueSource(MinifiedID id)
@@ -152,6 +159,8 @@ public:
return ValueSource(CellInJSStack, where);
case FlushedBoolean:
return ValueSource(BooleanInJSStack, where);
+ case FlushedArguments:
+ return ValueSource(ArgumentsSource);
}
RELEASE_ASSERT_NOT_REACHED();
return ValueSource();
@@ -167,8 +176,6 @@ public:
return kind() != SourceNotSet;
}
- bool operator!() const { return !isSet(); }
-
ValueSourceKind kind() const
{
return m_kind;
@@ -189,6 +196,9 @@ public:
case SourceIsDead:
return ValueRecovery::constant(jsUndefined());
+ case ArgumentsSource:
+ return ValueRecovery::argumentsThatWereNotCreated();
+
default:
return ValueRecovery::displacedInJSStack(virtualRegister(), dataFormat());
}
@@ -207,7 +217,6 @@ public:
}
void dump(PrintStream&) const;
- void dumpInContext(PrintStream&, DumpContext*) const;
private:
ValueSourceKind m_kind;
diff --git a/Source/JavaScriptCore/dfg/DFGValueStrength.cpp b/Source/JavaScriptCore/dfg/DFGValueStrength.cpp
deleted file mode 100644
index 2c6e612b4..000000000
--- a/Source/JavaScriptCore/dfg/DFGValueStrength.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGValueStrength.h"
-
-#if ENABLE(DFG_JIT)
-
-namespace WTF {
-
-using namespace JSC::DFG;
-
-void printInternal(PrintStream& out, ValueStrength strength)
-{
- switch (strength) {
- case WeakValue:
- out.print("Weak");
- return;
- case StrongValue:
- out.print("Strong");
- return;
- }
- RELEASE_ASSERT_NOT_REACHED();
-}
-
-} // namespace WTF
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGValueStrength.h b/Source/JavaScriptCore/dfg/DFGValueStrength.h
deleted file mode 100644
index 72cd71c29..000000000
--- a/Source/JavaScriptCore/dfg/DFGValueStrength.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGValueStrength_h
-#define DFGValueStrength_h
-
-#if ENABLE(DFG_JIT)
-
-#include <wtf/PrintStream.h>
-
-namespace JSC { namespace DFG {
-
-enum ValueStrength {
- // The value has been used for optimization and it arose through inference. We don't want the
- // fact that we optimized the code to result in the GC keeping this value alive unnecessarily,
- // so we'd rather kill the code and recompile than keep the object alive longer.
- WeakValue,
-
- // The code will keep this value alive. This is true of constants that were present in the
- // source. String constants tend to be strong.
- StrongValue
-};
-
-inline ValueStrength merge(ValueStrength a, ValueStrength b)
-{
- switch (a) {
- case WeakValue:
- return b;
- case StrongValue:
- return StrongValue;
- }
- RELEASE_ASSERT_NOT_REACHED();
-
- return WeakValue;
-}
-
-} } // namespace JSC::DFG
-
-namespace WTF {
-
-void printInternal(PrintStream&, JSC::DFG::ValueStrength);
-
-} // namespace WTF
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGValueStrength_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGVarargsForwardingPhase.cpp b/Source/JavaScriptCore/dfg/DFGVarargsForwardingPhase.cpp
deleted file mode 100644
index 9371de540..000000000
--- a/Source/JavaScriptCore/dfg/DFGVarargsForwardingPhase.cpp
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGVarargsForwardingPhase.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGArgumentsUtilities.h"
-#include "DFGClobberize.h"
-#include "DFGForAllKills.h"
-#include "DFGGraph.h"
-#include "DFGPhase.h"
-#include "JSCInlines.h"
-#include <wtf/ListDump.h>
-
-namespace JSC { namespace DFG {
-
-namespace {
-
-bool verbose = false;
-
-class VarargsForwardingPhase : public Phase {
-public:
- VarargsForwardingPhase(Graph& graph)
- : Phase(graph, "varargs forwarding")
- {
- }
-
- bool run()
- {
- DFG_ASSERT(m_graph, nullptr, m_graph.m_form != SSA);
-
- if (verbose) {
- dataLog("Graph before varargs forwarding:\n");
- m_graph.dump();
- }
-
- m_changed = false;
- for (BasicBlock* block : m_graph.blocksInNaturalOrder())
- handleBlock(block);
- return m_changed;
- }
-
-private:
- void handleBlock(BasicBlock* block)
- {
- for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
- switch (node->op()) {
- case CreateDirectArguments:
- case CreateClonedArguments:
- handleCandidate(block, nodeIndex);
- break;
- default:
- break;
- }
- }
- }
-
- void handleCandidate(BasicBlock* block, unsigned candidateNodeIndex)
- {
- // We expect calls into this function to be rare. So, this is written in a simple O(n) manner.
-
- Node* candidate = block->at(candidateNodeIndex);
- if (verbose)
- dataLog("Handling candidate ", candidate, "\n");
-
- // Find the index of the last node in this block to use the candidate, and look for escaping
- // sites.
- unsigned lastUserIndex = candidateNodeIndex;
- Vector<VirtualRegister, 2> relevantLocals; // This is a set. We expect it to be a small set.
- for (unsigned nodeIndex = candidateNodeIndex + 1; nodeIndex < block->size(); ++nodeIndex) {
- Node* node = block->at(nodeIndex);
-
- switch (node->op()) {
- case MovHint:
- if (node->child1() != candidate)
- break;
- lastUserIndex = nodeIndex;
- if (!relevantLocals.contains(node->unlinkedLocal()))
- relevantLocals.append(node->unlinkedLocal());
- break;
-
- case Check: {
- bool sawEscape = false;
- m_graph.doToChildren(
- node,
- [&] (Edge edge) {
- if (edge == candidate)
- lastUserIndex = nodeIndex;
-
- if (edge.willNotHaveCheck())
- return;
-
- if (alreadyChecked(edge.useKind(), SpecObject))
- return;
-
- sawEscape = true;
- });
- if (sawEscape) {
- if (verbose)
- dataLog(" Escape at ", node, "\n");
- return;
- }
- break;
- }
-
- case LoadVarargs:
- if (m_graph.uses(node, candidate))
- lastUserIndex = nodeIndex;
- break;
-
- case CallVarargs:
- case ConstructVarargs:
- if (node->child1() == candidate || node->child3() == candidate) {
- if (verbose)
- dataLog(" Escape at ", node, "\n");
- return;
- }
- if (node->child2() == candidate)
- lastUserIndex = nodeIndex;
- break;
-
- case SetLocal:
- if (node->child1() == candidate && node->variableAccessData()->isLoadedFrom()) {
- if (verbose)
- dataLog(" Escape at ", node, "\n");
- return;
- }
- break;
-
- default:
- if (m_graph.uses(node, candidate)) {
- if (verbose)
- dataLog(" Escape at ", node, "\n");
- return;
- }
- }
-
- forAllKilledOperands(
- m_graph, node, block->tryAt(nodeIndex + 1),
- [&] (VirtualRegister reg) {
- if (verbose)
- dataLog(" Killing ", reg, " while we are interested in ", listDump(relevantLocals), "\n");
- for (unsigned i = 0; i < relevantLocals.size(); ++i) {
- if (relevantLocals[i] == reg) {
- relevantLocals[i--] = relevantLocals.last();
- relevantLocals.removeLast();
- lastUserIndex = nodeIndex;
- }
- }
- });
- }
- if (verbose)
- dataLog("Selected lastUserIndex = ", lastUserIndex, ", ", block->at(lastUserIndex), "\n");
-
- // We're still in business. Determine if between the candidate and the last user there is any
- // effect that could interfere with sinking.
- for (unsigned nodeIndex = candidateNodeIndex + 1; nodeIndex <= lastUserIndex; ++nodeIndex) {
- Node* node = block->at(nodeIndex);
-
- // We have our own custom switch to detect some interferences that clobberize() wouldn't know
- // about, and also some of the common ones, too. In particular, clobberize() doesn't know
- // that Flush, MovHint, ZombieHint, and KillStack are bad because it's not worried about
- // what gets read on OSR exit.
- switch (node->op()) {
- case MovHint:
- case ZombieHint:
- case KillStack:
- if (argumentsInvolveStackSlot(candidate, node->unlinkedLocal())) {
- if (verbose)
- dataLog(" Interference at ", node, "\n");
- return;
- }
- break;
-
- case PutStack:
- if (argumentsInvolveStackSlot(candidate, node->stackAccessData()->local)) {
- if (verbose)
- dataLog(" Interference at ", node, "\n");
- return;
- }
- break;
-
- case SetLocal:
- case Flush:
- if (argumentsInvolveStackSlot(candidate, node->local())) {
- if (verbose)
- dataLog(" Interference at ", node, "\n");
- return;
- }
- break;
-
- default: {
- bool doesInterfere = false;
- clobberize(
- m_graph, node, NoOpClobberize(),
- [&] (AbstractHeap heap) {
- if (heap.kind() != Stack) {
- ASSERT(!heap.overlaps(Stack));
- return;
- }
- ASSERT(!heap.payload().isTop());
- VirtualRegister reg(heap.payload().value32());
- if (argumentsInvolveStackSlot(candidate, reg))
- doesInterfere = true;
- },
- NoOpClobberize());
- if (doesInterfere) {
- if (verbose)
- dataLog(" Interference at ", node, "\n");
- return;
- }
- } }
- }
-
- // We can make this work.
- if (verbose)
- dataLog(" Will do forwarding!\n");
- m_changed = true;
-
- // Transform the program.
- switch (candidate->op()) {
- case CreateDirectArguments:
- candidate->setOpAndDefaultFlags(PhantomDirectArguments);
- break;
-
- case CreateClonedArguments:
- candidate->setOpAndDefaultFlags(PhantomClonedArguments);
- break;
-
- default:
- DFG_CRASH(m_graph, candidate, "bad node type");
- break;
- }
- for (unsigned nodeIndex = candidateNodeIndex + 1; nodeIndex <= lastUserIndex; ++nodeIndex) {
- Node* node = block->at(nodeIndex);
- switch (node->op()) {
- case Check:
- case MovHint:
- case PutHint:
- // We don't need to change anything with these.
- break;
-
- case LoadVarargs:
- if (node->child1() != candidate)
- break;
- node->setOpAndDefaultFlags(ForwardVarargs);
- break;
-
- case CallVarargs:
- if (node->child2() != candidate)
- break;
- node->setOpAndDefaultFlags(CallForwardVarargs);
- break;
-
- case ConstructVarargs:
- if (node->child2() != candidate)
- break;
- node->setOpAndDefaultFlags(ConstructForwardVarargs);
- break;
-
- case SetLocal:
- // This is super odd. We don't have to do anything here, since in DFG IR, the phantom
- // arguments nodes do produce a JSValue. Also, we know that if this SetLocal referenecs a
- // candidate then the SetLocal - along with all of its references - will die off pretty
- // soon, since it has no real users. DCE will surely kill it. If we make it to SSA, then
- // SSA conversion will kill it.
- break;
-
- default:
- if (ASSERT_DISABLED)
- break;
- m_graph.doToChildren(
- node,
- [&] (Edge edge) {
- DFG_ASSERT(m_graph, node, edge != candidate);
- });
- break;
- }
- }
- }
-
- bool m_changed;
-};
-
-} // anonymous namespace
-
-bool performVarargsForwarding(Graph& graph)
-{
- SamplingRegion samplingRegion("DFG Varargs Forwarding Phase");
- return runPhase<VarargsForwardingPhase>(graph);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGVariableAccessData.cpp b/Source/JavaScriptCore/dfg/DFGVariableAccessData.cpp
deleted file mode 100644
index bd1ba87ee..000000000
--- a/Source/JavaScriptCore/dfg/DFGVariableAccessData.cpp
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGVariableAccessData.h"
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-VariableAccessData::VariableAccessData()
- : m_local(static_cast<VirtualRegister>(std::numeric_limits<int>::min()))
- , m_prediction(SpecNone)
- , m_argumentAwarePrediction(SpecNone)
- , m_flags(0)
- , m_shouldNeverUnbox(false)
- , m_structureCheckHoistingFailed(false)
- , m_checkArrayHoistingFailed(false)
- , m_isProfitableToUnbox(false)
- , m_isLoadedFrom(false)
- , m_doubleFormatState(EmptyDoubleFormatState)
-{
- clearVotes();
-}
-
-VariableAccessData::VariableAccessData(VirtualRegister local)
- : m_local(local)
- , m_prediction(SpecNone)
- , m_argumentAwarePrediction(SpecNone)
- , m_flags(0)
- , m_shouldNeverUnbox(false)
- , m_structureCheckHoistingFailed(false)
- , m_checkArrayHoistingFailed(false)
- , m_isProfitableToUnbox(false)
- , m_isLoadedFrom(false)
- , m_doubleFormatState(EmptyDoubleFormatState)
-{
- clearVotes();
-}
-
-bool VariableAccessData::mergeShouldNeverUnbox(bool shouldNeverUnbox)
-{
- bool newShouldNeverUnbox = m_shouldNeverUnbox | shouldNeverUnbox;
- if (newShouldNeverUnbox == m_shouldNeverUnbox)
- return false;
- m_shouldNeverUnbox = newShouldNeverUnbox;
- return true;
-}
-
-bool VariableAccessData::predict(SpeculatedType prediction)
-{
- VariableAccessData* self = find();
- bool result = mergeSpeculation(self->m_prediction, prediction);
- if (result)
- mergeSpeculation(m_argumentAwarePrediction, m_prediction);
- return result;
-}
-
-bool VariableAccessData::mergeArgumentAwarePrediction(SpeculatedType prediction)
-{
- return mergeSpeculation(find()->m_argumentAwarePrediction, prediction);
-}
-
-bool VariableAccessData::shouldUseDoubleFormatAccordingToVote()
-{
- // We don't support this facility for arguments, yet.
- // FIXME: make this work for arguments.
- if (local().isArgument())
- return false;
-
- // If the variable is not a number prediction, then this doesn't
- // make any sense.
- if (!isFullNumberSpeculation(prediction())) {
- // FIXME: we may end up forcing a local in inlined argument position to be a double even
- // if it is sometimes not even numeric, since this never signals the fact that it doesn't
- // want doubles. https://bugs.webkit.org/show_bug.cgi?id=109511
- return false;
- }
-
- // If the variable is predicted to hold only doubles, then it's a
- // no-brainer: it should be formatted as a double.
- if (isDoubleSpeculation(prediction()))
- return true;
-
- // If the variable is known to be used as an integer, then be safe -
- // don't force it to be a double.
- if (flags() & NodeBytecodeUsesAsInt)
- return false;
-
- // If the variable has been voted to become a double, then make it a
- // double.
- if (voteRatio() >= Options::doubleVoteRatioForDoubleFormat())
- return true;
-
- return false;
-}
-
-bool VariableAccessData::tallyVotesForShouldUseDoubleFormat()
-{
- ASSERT(isRoot());
-
- if (local().isArgument() || shouldNeverUnbox()
- || (flags() & NodeBytecodeUsesAsArrayIndex))
- return DFG::mergeDoubleFormatState(m_doubleFormatState, NotUsingDoubleFormat);
-
- if (m_doubleFormatState == CantUseDoubleFormat)
- return false;
-
- bool newValueOfShouldUseDoubleFormat = shouldUseDoubleFormatAccordingToVote();
- if (!newValueOfShouldUseDoubleFormat) {
- // We monotonically convert to double. Hence, if the fixpoint leads us to conclude that we should
- // switch back to int, we instead ignore this and stick with double.
- return false;
- }
-
- if (m_doubleFormatState == UsingDoubleFormat)
- return false;
-
- return DFG::mergeDoubleFormatState(m_doubleFormatState, UsingDoubleFormat);
-}
-
-bool VariableAccessData::mergeDoubleFormatState(DoubleFormatState doubleFormatState)
-{
- return DFG::mergeDoubleFormatState(find()->m_doubleFormatState, doubleFormatState);
-}
-
-bool VariableAccessData::makePredictionForDoubleFormat()
-{
- ASSERT(isRoot());
-
- if (m_doubleFormatState != UsingDoubleFormat)
- return false;
-
- SpeculatedType type = m_prediction;
- if (type & ~SpecBytecodeNumber)
- type |= SpecDoublePureNaN;
- if (type & SpecMachineInt)
- type |= SpecInt52AsDouble;
- return checkAndSet(m_prediction, type);
-}
-
-bool VariableAccessData::couldRepresentInt52()
-{
- if (shouldNeverUnbox())
- return false;
-
- return couldRepresentInt52Impl();
-}
-
-bool VariableAccessData::couldRepresentInt52Impl()
-{
- // The hardware has to support it.
- if (!enableInt52())
- return false;
-
- // We punt for machine arguments.
- if (m_local.isArgument())
- return false;
-
- // The argument-aware prediction -- which merges all of an (inlined or machine)
- // argument's variable access datas' predictions -- must possibly be MachineInt.
- return !(argumentAwarePrediction() & ~SpecMachineInt);
-}
-
-FlushFormat VariableAccessData::flushFormat()
-{
- ASSERT(find() == this);
-
- if (!shouldUnboxIfPossible())
- return FlushedJSValue;
-
- if (shouldUseDoubleFormat())
- return FlushedDouble;
-
- SpeculatedType prediction = argumentAwarePrediction();
-
- // This guard is here to protect the call to couldRepresentInt52(), which will return
- // true for !prediction.
- if (!prediction)
- return FlushedJSValue;
-
- if (isInt32Speculation(prediction))
- return FlushedInt32;
-
- if (couldRepresentInt52Impl())
- return FlushedInt52;
-
- if (isCellSpeculation(prediction))
- return FlushedCell;
-
- if (isBooleanSpeculation(prediction))
- return FlushedBoolean;
-
- return FlushedJSValue;
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGVariableAccessData.h b/Source/JavaScriptCore/dfg/DFGVariableAccessData.h
index 0f817561c..5f83aeaf5 100644
--- a/Source/JavaScriptCore/dfg/DFGVariableAccessData.h
+++ b/Source/JavaScriptCore/dfg/DFGVariableAccessData.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,8 +26,6 @@
#ifndef DFGVariableAccessData_h
#define DFGVariableAccessData_h
-#if ENABLE(DFG_JIT)
-
#include "DFGCommon.h"
#include "DFGDoubleFormatState.h"
#include "DFGFlushFormat.h"
@@ -36,6 +34,7 @@
#include "Operands.h"
#include "SpeculatedType.h"
#include "VirtualRegister.h"
+#include <wtf/Platform.h>
#include <wtf/UnionFind.h>
#include <wtf/Vector.h>
@@ -47,8 +46,39 @@ enum DoubleBallot { VoteValue, VoteDouble };
class VariableAccessData : public UnionFind<VariableAccessData> {
public:
- VariableAccessData();
- VariableAccessData(VirtualRegister local);
+ VariableAccessData()
+ : m_local(static_cast<VirtualRegister>(std::numeric_limits<int>::min()))
+ , m_prediction(SpecNone)
+ , m_argumentAwarePrediction(SpecNone)
+ , m_flags(0)
+ , m_isCaptured(false)
+ , m_shouldNeverUnbox(false)
+ , m_isArgumentsAlias(false)
+ , m_structureCheckHoistingFailed(false)
+ , m_checkArrayHoistingFailed(false)
+ , m_isProfitableToUnbox(false)
+ , m_isLoadedFrom(false)
+ , m_doubleFormatState(EmptyDoubleFormatState)
+ {
+ clearVotes();
+ }
+
+ VariableAccessData(VirtualRegister local, bool isCaptured)
+ : m_local(local)
+ , m_prediction(SpecNone)
+ , m_argumentAwarePrediction(SpecNone)
+ , m_flags(0)
+ , m_isCaptured(isCaptured)
+ , m_shouldNeverUnbox(isCaptured)
+ , m_isArgumentsAlias(false)
+ , m_structureCheckHoistingFailed(false)
+ , m_checkArrayHoistingFailed(false)
+ , m_isProfitableToUnbox(false)
+ , m_isLoadedFrom(false)
+ , m_doubleFormatState(EmptyDoubleFormatState)
+ {
+ clearVotes();
+ }
VirtualRegister local()
{
@@ -62,9 +92,20 @@ public:
return m_machineLocal;
}
+ bool mergeIsCaptured(bool isCaptured)
+ {
+ return checkAndSet(m_shouldNeverUnbox, m_shouldNeverUnbox | isCaptured)
+ | checkAndSet(m_isCaptured, m_isCaptured | isCaptured);
+ }
+
+ bool isCaptured()
+ {
+ return m_isCaptured;
+ }
+
bool mergeIsProfitableToUnbox(bool isProfitableToUnbox)
{
- return checkAndSet(m_isProfitableToUnbox, m_isProfitableToUnbox || isProfitableToUnbox);
+ return checkAndSet(m_isProfitableToUnbox, m_isProfitableToUnbox | isProfitableToUnbox);
}
bool isProfitableToUnbox()
@@ -72,13 +113,21 @@ public:
return m_isProfitableToUnbox;
}
- bool mergeShouldNeverUnbox(bool shouldNeverUnbox);
+ bool mergeShouldNeverUnbox(bool shouldNeverUnbox)
+ {
+ bool newShouldNeverUnbox = m_shouldNeverUnbox | shouldNeverUnbox;
+ if (newShouldNeverUnbox == m_shouldNeverUnbox)
+ return false;
+ m_shouldNeverUnbox = newShouldNeverUnbox;
+ return true;
+ }
// Returns true if it would be unsound to store the value in an unboxed fashion.
// If this returns false, it simply means that it is sound to unbox; it doesn't
// mean that we have actually done so.
bool shouldNeverUnbox()
{
+ ASSERT(!(m_isCaptured && !m_shouldNeverUnbox));
return m_shouldNeverUnbox;
}
@@ -92,12 +141,12 @@ public:
bool mergeStructureCheckHoistingFailed(bool failed)
{
- return checkAndSet(m_structureCheckHoistingFailed, m_structureCheckHoistingFailed || failed);
+ return checkAndSet(m_structureCheckHoistingFailed, m_structureCheckHoistingFailed | failed);
}
bool mergeCheckArrayHoistingFailed(bool failed)
{
- return checkAndSet(m_checkArrayHoistingFailed, m_checkArrayHoistingFailed || failed);
+ return checkAndSet(m_checkArrayHoistingFailed, m_checkArrayHoistingFailed | failed);
}
bool structureCheckHoistingFailed()
@@ -110,9 +159,19 @@ public:
return m_checkArrayHoistingFailed;
}
+ bool mergeIsArgumentsAlias(bool isArgumentsAlias)
+ {
+ return checkAndSet(m_isArgumentsAlias, m_isArgumentsAlias | isArgumentsAlias);
+ }
+
+ bool isArgumentsAlias()
+ {
+ return m_isArgumentsAlias;
+ }
+
bool mergeIsLoadedFrom(bool isLoadedFrom)
{
- return checkAndSet(m_isLoadedFrom, m_isLoadedFrom || isLoadedFrom);
+ return checkAndSet(m_isLoadedFrom, m_isLoadedFrom | isLoadedFrom);
}
void setIsLoadedFrom(bool isLoadedFrom)
@@ -125,7 +184,14 @@ public:
return m_isLoadedFrom;
}
- bool predict(SpeculatedType prediction);
+ bool predict(SpeculatedType prediction)
+ {
+ VariableAccessData* self = find();
+ bool result = mergeSpeculation(self->m_prediction, prediction);
+ if (result)
+ mergeSpeculation(m_argumentAwarePrediction, m_prediction);
+ return result;
+ }
SpeculatedType nonUnifiedPrediction()
{
@@ -142,7 +208,10 @@ public:
return find()->m_argumentAwarePrediction;
}
- bool mergeArgumentAwarePrediction(SpeculatedType prediction);
+ bool mergeArgumentAwarePrediction(SpeculatedType prediction)
+ {
+ return mergeSpeculation(find()->m_argumentAwarePrediction, prediction);
+ }
void clearVotes()
{
@@ -151,10 +220,10 @@ public:
m_votes[1] = 0;
}
- void vote(unsigned ballot, float weight = 1)
+ void vote(unsigned ballot)
{
ASSERT(ballot < 2);
- m_votes[ballot] += weight;
+ m_votes[ballot]++;
}
double voteRatio()
@@ -163,7 +232,39 @@ public:
return static_cast<double>(m_votes[1]) / m_votes[0];
}
- bool shouldUseDoubleFormatAccordingToVote();
+ bool shouldUseDoubleFormatAccordingToVote()
+ {
+ // We don't support this facility for arguments, yet.
+ // FIXME: make this work for arguments.
+ if (local().isArgument())
+ return false;
+
+ // If the variable is not a number prediction, then this doesn't
+ // make any sense.
+ if (!isFullNumberSpeculation(prediction())) {
+ // FIXME: we may end up forcing a local in inlined argument position to be a double even
+ // if it is sometimes not even numeric, since this never signals the fact that it doesn't
+ // want doubles. https://bugs.webkit.org/show_bug.cgi?id=109511
+ return false;
+ }
+
+ // If the variable is predicted to hold only doubles, then it's a
+ // no-brainer: it should be formatted as a double.
+ if (isDoubleSpeculation(prediction()))
+ return true;
+
+ // If the variable is known to be used as an integer, then be safe -
+ // don't force it to be a double.
+ if (flags() & NodeBytecodeUsesAsInt)
+ return false;
+
+ // If the variable has been voted to become a double, then make it a
+ // double.
+ if (voteRatio() >= Options::doubleVoteRatioForDoubleFormat())
+ return true;
+
+ return false;
+ }
DoubleFormatState doubleFormatState()
{
@@ -175,14 +276,47 @@ public:
ASSERT(isRoot());
bool doubleState = m_doubleFormatState == UsingDoubleFormat;
ASSERT(!(doubleState && shouldNeverUnbox()));
+ ASSERT(!(doubleState && isCaptured()));
return doubleState && isProfitableToUnbox();
}
- bool tallyVotesForShouldUseDoubleFormat();
+ bool tallyVotesForShouldUseDoubleFormat()
+ {
+ ASSERT(isRoot());
+
+ if (local().isArgument() || shouldNeverUnbox())
+ return DFG::mergeDoubleFormatState(m_doubleFormatState, NotUsingDoubleFormat);
+
+ if (m_doubleFormatState == CantUseDoubleFormat)
+ return false;
+
+ bool newValueOfShouldUseDoubleFormat = shouldUseDoubleFormatAccordingToVote();
+ if (!newValueOfShouldUseDoubleFormat) {
+ // We monotonically convert to double. Hence, if the fixpoint leads us to conclude that we should
+ // switch back to int, we instead ignore this and stick with double.
+ return false;
+ }
+
+ if (m_doubleFormatState == UsingDoubleFormat)
+ return false;
+
+ return DFG::mergeDoubleFormatState(m_doubleFormatState, UsingDoubleFormat);
+ }
- bool mergeDoubleFormatState(DoubleFormatState);
+ bool mergeDoubleFormatState(DoubleFormatState doubleFormatState)
+ {
+ return DFG::mergeDoubleFormatState(find()->m_doubleFormatState, doubleFormatState);
+ }
- bool makePredictionForDoubleFormat();
+ bool makePredictionForDoubleFormat()
+ {
+ ASSERT(isRoot());
+
+ if (m_doubleFormatState != UsingDoubleFormat)
+ return false;
+
+ return mergeSpeculation(m_prediction, SpecDouble);
+ }
NodeFlags flags() const { return m_flags; }
@@ -191,9 +325,34 @@ public:
return checkAndSet(m_flags, m_flags | newFlags);
}
- FlushFormat flushFormat();
-
- bool couldRepresentInt52();
+ FlushFormat flushFormat()
+ {
+ ASSERT(find() == this);
+
+ if (isArgumentsAlias())
+ return FlushedArguments;
+
+ if (!shouldUnboxIfPossible())
+ return FlushedJSValue;
+
+ if (shouldUseDoubleFormat())
+ return FlushedDouble;
+
+ SpeculatedType prediction = argumentAwarePrediction();
+ if (isInt32Speculation(prediction))
+ return FlushedInt32;
+
+ if (enableInt52() && !m_local.isArgument() && isMachineIntSpeculation(prediction))
+ return FlushedInt52;
+
+ if (isCellSpeculation(prediction))
+ return FlushedCell;
+
+ if (isBooleanSpeculation(prediction))
+ return FlushedBoolean;
+
+ return FlushedJSValue;
+ }
FlushedAt flushedAt()
{
@@ -201,8 +360,6 @@ public:
}
private:
- bool couldRepresentInt52Impl();
-
// This is slightly space-inefficient, since anything we're unified with
// will have the same operand and should have the same prediction. But
// putting them here simplifies the code, and we don't expect DFG space
@@ -214,7 +371,9 @@ private:
SpeculatedType m_argumentAwarePrediction;
NodeFlags m_flags;
+ bool m_isCaptured;
bool m_shouldNeverUnbox;
+ bool m_isArgumentsAlias;
bool m_structureCheckHoistingFailed;
bool m_checkArrayHoistingFailed;
bool m_isProfitableToUnbox;
@@ -226,6 +385,4 @@ private:
} } // namespace JSC::DFG
-#endif // ENABLE(DFG_JIT)
-
#endif // DFGVariableAccessData_h
diff --git a/Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.cpp b/Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.cpp
index 00621737e..92050006f 100644
--- a/Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.cpp
+++ b/Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,7 +30,6 @@
#include "DFGGraph.h"
#include "DFGVariableAccessData.h"
-#include "JSCInlines.h"
namespace JSC { namespace DFG {
@@ -62,7 +61,9 @@ void VariableAccessDataDump::dump(PrintStream& out) const
index /= 26;
}
- if (m_data->shouldNeverUnbox())
+ if (m_data->isCaptured())
+ out.print("*");
+ else if (m_data->shouldNeverUnbox())
out.print("!");
else if (!m_data->shouldUnboxIfPossible())
out.print("~");
diff --git a/Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.h b/Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.h
index fd53fcd2c..1422d7fac 100644
--- a/Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.h
+++ b/Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef DFGVariableAccessDataDump_h
#define DFGVariableAccessDataDump_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include <wtf/PrintStream.h>
diff --git a/Source/JavaScriptCore/dfg/DFGVariableEvent.cpp b/Source/JavaScriptCore/dfg/DFGVariableEvent.cpp
index 28e437fd5..bb104ab54 100644
--- a/Source/JavaScriptCore/dfg/DFGVariableEvent.cpp
+++ b/Source/JavaScriptCore/dfg/DFGVariableEvent.cpp
@@ -30,7 +30,6 @@
#include "FPRInfo.h"
#include "GPRInfo.h"
-#include "JSCInlines.h"
namespace JSC { namespace DFG {
@@ -46,9 +45,6 @@ void VariableEvent::dump(PrintStream& out) const
case BirthToSpill:
dumpSpillInfo("BirthToSpill", out);
break;
- case Birth:
- out.print("Birth(", id(), ")");
- break;
case Fill:
dumpFillInfo("Fill", out);
break;
@@ -59,11 +55,11 @@ void VariableEvent::dump(PrintStream& out) const
out.print("Death(", id(), ")");
break;
case MovHintEvent:
- out.print("MovHint(", id(), ", ", bytecodeRegister(), ")");
+ out.print("MovHint(", id(), ", r", bytecodeRegister(), ")");
break;
case SetLocalEvent:
out.print(
- "SetLocal(machine:", machineRegister(), " -> bytecode:", bytecodeRegister(),
+ "SetLocal(machine:r", machineRegister(), " -> bytecode:r", bytecodeRegister(),
", ", dataFormatToString(dataFormat()), ")");
break;
default:
@@ -88,7 +84,7 @@ void VariableEvent::dumpFillInfo(const char* name, PrintStream& out) const
void VariableEvent::dumpSpillInfo(const char* name, PrintStream& out) const
{
- out.print(name, "(", id(), ", ", spillRegister(), ", ", dataFormatToString(dataFormat()), ")");
+ out.print(name, "(", id(), ", r", spillRegister(), ", ", dataFormatToString(dataFormat()), ")");
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGVariableEvent.h b/Source/JavaScriptCore/dfg/DFGVariableEvent.h
index 5fa4bb686..24423ed2d 100644
--- a/Source/JavaScriptCore/dfg/DFGVariableEvent.h
+++ b/Source/JavaScriptCore/dfg/DFGVariableEvent.h
@@ -26,6 +26,8 @@
#ifndef DFGVariableEvent_h
#define DFGVariableEvent_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
@@ -47,7 +49,6 @@ enum VariableEventKind {
// that we start to care about this node.
BirthToFill,
BirthToSpill,
- Birth,
// Events related to how a node is represented.
Fill,
@@ -134,14 +135,6 @@ public:
return event;
}
- static VariableEvent birth(MinifiedID id)
- {
- VariableEvent event;
- event.m_which.id = id.bits();
- event.m_kind = Birth;
- return event;
- }
-
static VariableEvent spill(VariableEventKind kind, MinifiedID id, VirtualRegister virtualRegister, DataFormat format)
{
ASSERT(kind == BirthToSpill || kind == Spill);
@@ -188,17 +181,17 @@ public:
MinifiedID id() const
{
- ASSERT(
- m_kind == BirthToFill || m_kind == Fill || m_kind == BirthToSpill || m_kind == Spill
- || m_kind == Death || m_kind == MovHintEvent || m_kind == Birth);
+ ASSERT(m_kind == BirthToFill || m_kind == Fill
+ || m_kind == BirthToSpill || m_kind == Spill
+ || m_kind == Death || m_kind == MovHintEvent);
return MinifiedID::fromBits(m_which.id);
}
DataFormat dataFormat() const
{
- ASSERT(
- m_kind == BirthToFill || m_kind == Fill || m_kind == BirthToSpill || m_kind == Spill
- || m_kind == SetLocalEvent);
+ ASSERT(m_kind == BirthToFill || m_kind == Fill
+ || m_kind == BirthToSpill || m_kind == Spill
+ || m_kind == SetLocalEvent);
return static_cast<DataFormat>(m_dataFormat);
}
diff --git a/Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp b/Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp
index e3f413c3b..98e08f6ff 100644
--- a/Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp
+++ b/Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,7 +31,7 @@
#include "CodeBlock.h"
#include "DFGJITCode.h"
#include "DFGValueSource.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include <wtf/DataLog.h>
#include <wtf/HashMap.h>
@@ -48,14 +48,11 @@ namespace {
struct MinifiedGenerationInfo {
bool filled; // true -> in gpr/fpr/pair, false -> spilled
- bool alive;
VariableRepresentation u;
DataFormat format;
MinifiedGenerationInfo()
- : filled(false)
- , alive(false)
- , format(DataFormatNone)
+ : format(DataFormatNone)
{
}
@@ -65,19 +62,13 @@ struct MinifiedGenerationInfo {
case BirthToFill:
case Fill:
filled = true;
- alive = true;
break;
case BirthToSpill:
case Spill:
filled = false;
- alive = true;
break;
- case Birth:
- alive = true;
- return;
case Death:
format = DataFormatNone;
- alive = false;
return;
default:
return;
@@ -90,23 +81,25 @@ struct MinifiedGenerationInfo {
} // namespace
-bool VariableEventStream::tryToSetConstantRecovery(ValueRecovery& recovery, MinifiedNode* node) const
+bool VariableEventStream::tryToSetConstantRecovery(ValueRecovery& recovery, CodeBlock* codeBlock, MinifiedNode* node) const
{
if (!node)
return false;
- if (node->hasConstant()) {
- recovery = ValueRecovery::constant(node->constant());
+ if (node->hasConstantNumber()) {
+ recovery = ValueRecovery::constant(
+ codeBlock->constantRegister(
+ FirstConstantRegisterIndex + node->constantNumber()).get());
return true;
}
- if (node->op() == PhantomDirectArguments) {
- recovery = ValueRecovery::directArgumentsThatWereNotCreated(node->id());
+ if (node->hasWeakConstant()) {
+ recovery = ValueRecovery::constant(node->weakConstant());
return true;
}
- if (node->op() == PhantomClonedArguments) {
- recovery = ValueRecovery::outOfBandArgumentsThatWereNotCreated(node->id());
+ if (node->op() == PhantomArguments) {
+ recovery = ValueRecovery::argumentsThatWereNotCreated();
return true;
}
@@ -155,8 +148,7 @@ void VariableEventStream::reconstruct(
// nothing to do.
break;
case BirthToFill:
- case BirthToSpill:
- case Birth: {
+ case BirthToSpill: {
MinifiedGenerationInfo info;
info.update(event);
generationInfos.add(event.id(), info);
@@ -195,14 +187,14 @@ void VariableEventStream::reconstruct(
ASSERT(source.kind() == HaveNode);
MinifiedNode* node = graph.at(source.id());
+ if (tryToSetConstantRecovery(valueRecoveries[i], codeBlock, node))
+ continue;
+
MinifiedGenerationInfo info = generationInfos.get(source.id());
- if (!info.alive) {
+ if (info.format == DataFormatNone) {
valueRecoveries[i] = ValueRecovery::constant(jsUndefined());
continue;
}
-
- if (tryToSetConstantRecovery(valueRecoveries[i], node))
- continue;
ASSERT(info.format != DataFormatNone);
diff --git a/Source/JavaScriptCore/dfg/DFGVariableEventStream.h b/Source/JavaScriptCore/dfg/DFGVariableEventStream.h
index b0e4afac5..130fd6a99 100644
--- a/Source/JavaScriptCore/dfg/DFGVariableEventStream.h
+++ b/Source/JavaScriptCore/dfg/DFGVariableEventStream.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,13 +26,14 @@
#ifndef DFGVariableEventStream_h
#define DFGVariableEventStream_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
#include "DFGMinifiedGraph.h"
#include "DFGVariableEvent.h"
#include "Operands.h"
-#include "ValueRecovery.h"
#include <wtf/Vector.h>
namespace JSC { namespace DFG {
@@ -49,7 +50,7 @@ public:
unsigned index, Operands<ValueRecovery>&) const;
private:
- bool tryToSetConstantRecovery(ValueRecovery&, MinifiedNode*) const;
+ bool tryToSetConstantRecovery(ValueRecovery&, CodeBlock*, MinifiedNode*) const;
void logEvent(const VariableEvent&);
};
diff --git a/Source/JavaScriptCore/dfg/DFGVariadicFunction.h b/Source/JavaScriptCore/dfg/DFGVariadicFunction.h
new file mode 100644
index 000000000..f5523af77
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGVariadicFunction.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGVariadicFunction_h
+#define DFGVariadicFunction_h
+
+#define DFG_COMMA ,
+
+// The signature of v is (templatePre, templatePost, typeParams, valueParams, valueArgs)
+//
+// You would use it like:
+// #define DEFINE_FUNCTION(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs)
+// templatePre typeParams templatePost void f(valueParams) { g(valueArgs); }
+// DFG_VARIADIC_TEMPLATE_FUNCTION(DEFINE_FUNCTION)
+// #undef DEFINE_FUNCTION
+//
+// Or if you wanted the defined function to take an additional template arg, you would do:
+// #define DEFINE_FUNCTION(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs)
+// template<typename T valueParamsComma typeParams> void f(T value valueParamsComma valueParams) { g(value, valueArgs); }
+// DFG_VARIADIC_TEMPLATE_FUNCTION(DEFINE_FUNCTION)
+// #undef DEFINE_FUNCTION
+
+#define DFG_VARIADIC_TEMPLATE_FUNCTION(v) \
+ v(, , , , , ) \
+ v(template<, >, typename _DFG_T1, DFG_COMMA, const _DFG_T1& _DFG_value1, _DFG_value1) \
+ v(template<, >, typename _DFG_T1 DFG_COMMA typename _DFG_T2, DFG_COMMA, const _DFG_T1& _DFG_value1 DFG_COMMA const _DFG_T2& _DFG_value2, _DFG_value1 DFG_COMMA _DFG_value2) \
+ v(template<, >, typename _DFG_T1 DFG_COMMA typename _DFG_T2 DFG_COMMA typename _DFG_T3, DFG_COMMA, const _DFG_T1& _DFG_value1 DFG_COMMA const _DFG_T2& _DFG_value2 DFG_COMMA const _DFG_T3& _DFG_value3, _DFG_value1 DFG_COMMA _DFG_value2 DFG_COMMA _DFG_value3) \
+ v(template<, >, typename _DFG_T1 DFG_COMMA typename _DFG_T2 DFG_COMMA typename _DFG_T3 DFG_COMMA typename _DFG_T4, DFG_COMMA, const _DFG_T1& _DFG_value1 DFG_COMMA const _DFG_T2& _DFG_value2 DFG_COMMA const _DFG_T3& _DFG_value3 DFG_COMMA const _DFG_T4& _DFG_value4, _DFG_value1 DFG_COMMA _DFG_value2 DFG_COMMA _DFG_value3 DFG_COMMA _DFG_value4) \
+ v(template<, >, typename _DFG_T1 DFG_COMMA typename _DFG_T2 DFG_COMMA typename _DFG_T3 DFG_COMMA typename _DFG_T4 DFG_COMMA typename _DFG_T5, DFG_COMMA, const _DFG_T1& _DFG_value1 DFG_COMMA const _DFG_T2& _DFG_value2 DFG_COMMA const _DFG_T3& _DFG_value3 DFG_COMMA const _DFG_T4& _DFG_value4 DFG_COMMA const _DFG_T5& _DFG_value5, _DFG_value1 DFG_COMMA _DFG_value2 DFG_COMMA _DFG_value3 DFG_COMMA _DFG_value4 DFG_COMMA _DFG_value5) \
+ v(template<, >, typename _DFG_T1 DFG_COMMA typename _DFG_T2 DFG_COMMA typename _DFG_T3 DFG_COMMA typename _DFG_T4 DFG_COMMA typename _DFG_T5 DFG_COMMA typename _DFG_T6, DFG_COMMA, const _DFG_T1& _DFG_value1 DFG_COMMA const _DFG_T2& _DFG_value2 DFG_COMMA const _DFG_T3& _DFG_value3 DFG_COMMA const _DFG_T4& _DFG_value4 DFG_COMMA const _DFG_T5& _DFG_value5 DFG_COMMA const _DFG_T6& _DFG_value6, _DFG_value1 DFG_COMMA _DFG_value2 DFG_COMMA _DFG_value3 DFG_COMMA _DFG_value4 DFG_COMMA _DFG_value5 DFG_COMMA _DFG_value6) \
+ v(template<, >, typename _DFG_T1 DFG_COMMA typename _DFG_T2 DFG_COMMA typename _DFG_T3 DFG_COMMA typename _DFG_T4 DFG_COMMA typename _DFG_T5 DFG_COMMA typename _DFG_T6 DFG_COMMA typename _DFG_T7, DFG_COMMA, const _DFG_T1& _DFG_value1 DFG_COMMA const _DFG_T2& _DFG_value2 DFG_COMMA const _DFG_T3& _DFG_value3 DFG_COMMA const _DFG_T4& _DFG_value4 DFG_COMMA const _DFG_T5& _DFG_value5 DFG_COMMA const _DFG_T6& _DFG_value6 DFG_COMMA const _DFG_T7& _DFG_value7, _DFG_value1 DFG_COMMA _DFG_value2 DFG_COMMA _DFG_value3 DFG_COMMA _DFG_value4 DFG_COMMA _DFG_value5 DFG_COMMA _DFG_value6 DFG_COMMA _DFG_value7) \
+ v(template<, >, typename _DFG_T1 DFG_COMMA typename _DFG_T2 DFG_COMMA typename _DFG_T3 DFG_COMMA typename _DFG_T4 DFG_COMMA typename _DFG_T5 DFG_COMMA typename _DFG_T6 DFG_COMMA typename _DFG_T7 DFG_COMMA typename _DFG_T8, DFG_COMMA, const _DFG_T1& _DFG_value1 DFG_COMMA const _DFG_T2& _DFG_value2 DFG_COMMA const _DFG_T3& _DFG_value3 DFG_COMMA const _DFG_T4& _DFG_value4 DFG_COMMA const _DFG_T5& _DFG_value5 DFG_COMMA const _DFG_T6& _DFG_value6 DFG_COMMA const _DFG_T7& _DFG_value7 DFG_COMMA _DFG_T8& _DFG_value8, _DFG_value1 DFG_COMMA _DFG_value2 DFG_COMMA _DFG_value3 DFG_COMMA _DFG_value4 DFG_COMMA _DFG_value5 DFG_COMMA _DFG_value6 DFG_COMMA _DFG_value7 DFG_COMMA _DFG_value8)
+
+#endif // DFGVariadicFunction_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp
index e5e133d43..71d526159 100644
--- a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp
@@ -30,7 +30,7 @@
#include "DFGGraph.h"
#include "DFGScoreBoard.h"
-#include "JSCInlines.h"
+#include "JSCellInlines.h"
#include "StackAlignment.h"
#include <wtf/StdLibExtras.h>
@@ -45,8 +45,6 @@ public:
bool run()
{
- DFG_ASSERT(m_graph, nullptr, m_graph.m_form == ThreadedCPS);
-
ScoreBoard scoreBoard(m_graph.m_nextMachineLocal);
scoreBoard.assertClear();
for (size_t blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
@@ -55,10 +53,6 @@ public:
continue;
if (!block->isReachable)
continue;
- if (!ASSERT_DISABLED) {
- // Force usage of highest-numbered virtual registers.
- scoreBoard.sortFree();
- }
for (size_t indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
Node* node = block->at(indexInBlock);
diff --git a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h
index 42128a0e8..5878ed13f 100644
--- a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGVirtualRegisterAllocationPhase_h
#define DFGVirtualRegisterAllocationPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGPhase.h"
diff --git a/Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.cpp b/Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.cpp
index f924e4a2c..78df55009 100644
--- a/Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,15 +32,7 @@
#include "DFGClobberize.h"
#include "DFGGraph.h"
#include "DFGPhase.h"
-#include "JSCInlines.h"
-
-// FIXME: Remove this phase entirely by moving the addLazily() calls into either the backend or
-// into the phase that performs the optimization. Moving the calls into the backend makes the most
-// sense when the intermediate phases don't need to know that the watchpoint was set. Moving the
-// calls earlier usually only makes sense if the node's only purpose was to convey the need for
-// the watchpoint (like VarInjectionWatchpoint). But, it can also make sense if the fact that the
-// watchpoint was set enables other optimizations.
-// https://bugs.webkit.org/show_bug.cgi?id=144669
+#include "Operations.h"
namespace JSC { namespace DFG {
@@ -72,6 +64,8 @@ public:
private:
void handle()
{
+ DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, handleEdge);
+
switch (m_node->op()) {
case CompareEqConstant:
case IsUndefined:
@@ -87,27 +81,87 @@ private:
case LogicalNot:
case Branch:
- switch (m_node->child1().useKind()) {
- case ObjectOrOtherUse:
- case UntypedUse:
+ if (m_node->child1().useKind() == ObjectOrOtherUse)
handleMasqueradesAsUndefined();
- break;
- default:
- break;
+ break;
+
+ case GetByVal:
+ if (m_node->arrayMode().type() == Array::Double
+ && m_node->arrayMode().isSaneChain()) {
+ addLazily(globalObject()->arrayPrototype()->structure()->transitionWatchpointSet());
+ addLazily(globalObject()->objectPrototype()->structure()->transitionWatchpointSet());
}
+
+ if (m_node->arrayMode().type() == Array::String)
+ handleStringGetByVal();
+
+ if (JSArrayBufferView* view = m_graph.tryGetFoldableViewForChild1(m_node))
+ addLazily(view);
+ break;
+
+ case PutByVal:
+ if (JSArrayBufferView* view = m_graph.tryGetFoldableViewForChild1(m_node))
+ addLazily(view);
+ break;
+
+ case StringCharAt:
+ handleStringGetByVal();
break;
case NewArray:
case NewArrayWithSize:
case NewArrayBuffer:
- if (!globalObject()->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType()))
+ if (!globalObject()->isHavingABadTime() && !hasArrayStorage(m_node->indexingType()))
addLazily(globalObject()->havingABadTimeWatchpoint());
break;
+ case AllocationProfileWatchpoint:
+ addLazily(jsCast<JSFunction*>(m_node->function())->allocationProfileWatchpointSet());
+ break;
+
+ case StructureTransitionWatchpoint:
+ m_graph.watchpoints().addLazily(
+ m_node->codeOrigin,
+ m_node->child1()->op() == WeakJSConstant ? BadWeakConstantCacheWatchpoint : BadCacheWatchpoint,
+ m_node->structure()->transitionWatchpointSet());
+ break;
+
+ case VariableWatchpoint:
+ addLazily(m_node->variableWatchpointSet());
+ break;
+
case VarInjectionWatchpoint:
addLazily(globalObject()->varInjectionWatchpoint());
break;
+ case FunctionReentryWatchpoint:
+ addLazily(m_node->symbolTable()->m_functionEnteredOnce);
+ break;
+
+ case TypedArrayWatchpoint:
+ addLazily(m_node->typedArray());
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ void handleEdge(Node*, Edge edge)
+ {
+ switch (edge.useKind()) {
+ case StringObjectUse:
+ case StringOrStringObjectUse: {
+ Structure* stringObjectStructure = globalObject()->stringObjectStructure();
+ Structure* stringPrototypeStructure = stringObjectStructure->storedPrototype().asCell()->structure();
+ ASSERT(m_graph.watchpoints().isValidOrMixed(stringPrototypeStructure->transitionWatchpointSet()));
+
+ m_graph.watchpoints().addLazily(
+ m_node->codeOrigin, NotStringObject,
+ stringPrototypeStructure->transitionWatchpointSet());
+ break;
+ }
+
default:
break;
}
@@ -115,10 +169,20 @@ private:
void handleMasqueradesAsUndefined()
{
- if (m_graph.masqueradesAsUndefinedWatchpointIsStillValid(m_node->origin.semantic))
+ if (m_graph.masqueradesAsUndefinedWatchpointIsStillValid(m_node->codeOrigin))
addLazily(globalObject()->masqueradesAsUndefinedWatchpoint());
}
+ void handleStringGetByVal()
+ {
+ if (!m_node->arrayMode().isOutOfBounds())
+ return;
+ if (!globalObject()->stringPrototypeChainIsSane())
+ return;
+ addLazily(globalObject()->stringPrototype()->structure()->transitionWatchpointSet());
+ addLazily(globalObject()->objectPrototype()->structure()->transitionWatchpointSet());
+ }
+
void addLazily(WatchpointSet* set)
{
m_graph.watchpoints().addLazily(set);
@@ -127,10 +191,14 @@ private:
{
m_graph.watchpoints().addLazily(set);
}
+ void addLazily(JSArrayBufferView* view)
+ {
+ m_graph.watchpoints().addLazily(view);
+ }
JSGlobalObject* globalObject()
{
- return m_graph.globalObjectFor(m_node->origin.semantic);
+ return m_graph.globalObjectFor(m_node->codeOrigin);
}
Node* m_node;
diff --git a/Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.h b/Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.h
index fe8fef5d3..eb41522b1 100644
--- a/Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.h
@@ -26,6 +26,8 @@
#ifndef DFGWatchpointCollectionPhase_h
#define DFGWatchpointCollectionPhase_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGWorklist.cpp b/Source/JavaScriptCore/dfg/DFGWorklist.cpp
index f4bb709dc..3df19ac11 100644
--- a/Source/JavaScriptCore/dfg/DFGWorklist.cpp
+++ b/Source/JavaScriptCore/dfg/DFGWorklist.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,65 +31,46 @@
#include "CodeBlock.h"
#include "DeferGC.h"
#include "DFGLongLivedState.h"
-#include "DFGSafepoint.h"
-#include "JSCInlines.h"
#include <mutex>
namespace JSC { namespace DFG {
-Worklist::Worklist(CString worklistName)
- : m_threadName(toCString(worklistName, " Worker Thread"))
- , m_numberOfActiveThreads(0)
+Worklist::Worklist()
+ : m_numberOfActiveThreads(0)
{
}
Worklist::~Worklist()
{
{
- LockHolder locker(m_lock);
+ MutexLocker locker(m_lock);
for (unsigned i = m_threads.size(); i--;)
m_queue.append(nullptr); // Use null plan to indicate that we want the thread to terminate.
- m_planEnqueued.notifyAll();
+ m_planEnqueued.broadcast();
}
for (unsigned i = m_threads.size(); i--;)
- waitForThreadCompletion(m_threads[i]->m_identifier);
+ waitForThreadCompletion(m_threads[i]);
ASSERT(!m_numberOfActiveThreads);
}
-void Worklist::finishCreation(unsigned numberOfThreads, int relativePriority)
+void Worklist::finishCreation(unsigned numberOfThreads)
{
RELEASE_ASSERT(numberOfThreads);
- for (unsigned i = numberOfThreads; i--;) {
- std::unique_ptr<ThreadData> data = std::make_unique<ThreadData>(this);
- data->m_identifier = createThread(threadFunction, data.get(), m_threadName.data());
- if (relativePriority)
- changeThreadPriority(data->m_identifier, relativePriority);
- m_threads.append(WTF::move(data));
- }
+ for (unsigned i = numberOfThreads; i--;)
+ m_threads.append(createThread(threadFunction, this, "JSC Compilation Thread"));
}
-Ref<Worklist> Worklist::create(CString worklistName, unsigned numberOfThreads, int relativePriority)
+PassRefPtr<Worklist> Worklist::create(unsigned numberOfThreads)
{
- Ref<Worklist> result = adoptRef(*new Worklist(worklistName));
- result->finishCreation(numberOfThreads, relativePriority);
+ RefPtr<Worklist> result = adoptRef(new Worklist());
+ result->finishCreation(numberOfThreads);
return result;
}
-bool Worklist::isActiveForVM(VM& vm) const
-{
- LockHolder locker(m_lock);
- PlanMap::const_iterator end = m_plans.end();
- for (PlanMap::const_iterator iter = m_plans.begin(); iter != end; ++iter) {
- if (&iter->value->vm == &vm)
- return true;
- }
- return false;
-}
-
void Worklist::enqueue(PassRefPtr<Plan> passedPlan)
{
RefPtr<Plan> plan = passedPlan;
- LockHolder locker(m_lock);
+ MutexLocker locker(m_lock);
if (Options::verboseCompilationQueue()) {
dump(locker, WTF::dataFile());
dataLog(": Enqueueing plan to optimize ", plan->key(), "\n");
@@ -97,16 +78,16 @@ void Worklist::enqueue(PassRefPtr<Plan> passedPlan)
ASSERT(m_plans.find(plan->key()) == m_plans.end());
m_plans.add(plan->key(), plan);
m_queue.append(plan);
- m_planEnqueued.notifyOne();
+ m_planEnqueued.signal();
}
Worklist::State Worklist::compilationState(CompilationKey key)
{
- LockHolder locker(m_lock);
+ MutexLocker locker(m_lock);
PlanMap::iterator iter = m_plans.find(key);
if (iter == m_plans.end())
return NotKnown;
- return iter->value->stage == Plan::Ready ? Compiled : Compiling;
+ return iter->value->isCompiled ? Compiled : Compiling;
}
void Worklist::waitUntilAllPlansForVMAreReady(VM& vm)
@@ -118,7 +99,7 @@ void Worklist::waitUntilAllPlansForVMAreReady(VM& vm)
// After we release this lock, we know that although other VMs may still
// be adding plans, our VM will not be.
- LockHolder locker(m_lock);
+ MutexLocker locker(m_lock);
if (Options::verboseCompilationQueue()) {
dump(locker, WTF::dataFile());
@@ -131,7 +112,7 @@ void Worklist::waitUntilAllPlansForVMAreReady(VM& vm)
for (PlanMap::iterator iter = m_plans.begin(); iter != end; ++iter) {
if (&iter->value->vm != &vm)
continue;
- if (iter->value->stage != Plan::Ready) {
+ if (!iter->value->isCompiled) {
allAreCompiled = false;
break;
}
@@ -147,12 +128,12 @@ void Worklist::waitUntilAllPlansForVMAreReady(VM& vm)
void Worklist::removeAllReadyPlansForVM(VM& vm, Vector<RefPtr<Plan>, 8>& myReadyPlans)
{
DeferGC deferGC(vm.heap);
- LockHolder locker(m_lock);
+ MutexLocker locker(m_lock);
for (size_t i = 0; i < m_readyPlans.size(); ++i) {
RefPtr<Plan> plan = m_readyPlans[i];
if (&plan->vm != &vm)
continue;
- if (plan->stage != Plan::Ready)
+ if (!plan->isCompiled)
continue;
myReadyPlans.append(plan);
m_readyPlans[i--] = m_readyPlans.last();
@@ -183,7 +164,7 @@ Worklist::State Worklist::completeAllReadyPlansForVM(VM& vm, CompilationKey requ
if (Options::verboseCompilationQueue())
dataLog(*this, ": Completing ", currentKey, "\n");
- RELEASE_ASSERT(plan->stage == Plan::Ready);
+ RELEASE_ASSERT(plan->isCompiled);
plan->finalizeAndNotifyCallback();
@@ -192,7 +173,7 @@ Worklist::State Worklist::completeAllReadyPlansForVM(VM& vm, CompilationKey requ
}
if (!!requestedKey && resultingState == NotKnown) {
- LockHolder locker(m_lock);
+ MutexLocker locker(m_lock);
if (m_plans.contains(requestedKey))
resultingState = Compiling;
}
@@ -207,105 +188,19 @@ void Worklist::completeAllPlansForVM(VM& vm)
completeAllReadyPlansForVM(vm);
}
-void Worklist::suspendAllThreads()
-{
- m_suspensionLock.lock();
- for (unsigned i = m_threads.size(); i--;)
- m_threads[i]->m_rightToRun.lock();
-}
-
-void Worklist::resumeAllThreads()
-{
- for (unsigned i = m_threads.size(); i--;)
- m_threads[i]->m_rightToRun.unlock();
- m_suspensionLock.unlock();
-}
-
-void Worklist::visitWeakReferences(SlotVisitor& visitor, CodeBlockSet& codeBlocks)
-{
- VM* vm = visitor.heap()->vm();
- {
- LockHolder locker(m_lock);
- for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
- Plan* plan = iter->value.get();
- if (&plan->vm != vm)
- continue;
- iter->value->checkLivenessAndVisitChildren(visitor, codeBlocks);
- }
- }
- // This loop doesn't need locking because:
- // (1) no new threads can be added to m_threads. Hence, it is immutable and needs no locks.
- // (2) ThreadData::m_safepoint is protected by that thread's m_rightToRun which we must be
- // holding here because of a prior call to suspendAllThreads().
- for (unsigned i = m_threads.size(); i--;) {
- ThreadData* data = m_threads[i].get();
- Safepoint* safepoint = data->m_safepoint;
- if (safepoint && &safepoint->vm() == vm)
- safepoint->checkLivenessAndVisitChildren(visitor);
- }
-}
-
-void Worklist::removeDeadPlans(VM& vm)
-{
- {
- LockHolder locker(m_lock);
- HashSet<CompilationKey> deadPlanKeys;
- for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
- Plan* plan = iter->value.get();
- if (&plan->vm != &vm)
- continue;
- if (plan->isKnownToBeLiveDuringGC())
- continue;
- RELEASE_ASSERT(plan->stage != Plan::Cancelled); // Should not be cancelled, yet.
- ASSERT(!deadPlanKeys.contains(plan->key()));
- deadPlanKeys.add(plan->key());
- }
- if (!deadPlanKeys.isEmpty()) {
- for (HashSet<CompilationKey>::iterator iter = deadPlanKeys.begin(); iter != deadPlanKeys.end(); ++iter)
- m_plans.take(*iter)->cancel();
- Deque<RefPtr<Plan>> newQueue;
- while (!m_queue.isEmpty()) {
- RefPtr<Plan> plan = m_queue.takeFirst();
- if (plan->stage != Plan::Cancelled)
- newQueue.append(plan);
- }
- m_queue.swap(newQueue);
- for (unsigned i = 0; i < m_readyPlans.size(); ++i) {
- if (m_readyPlans[i]->stage != Plan::Cancelled)
- continue;
- m_readyPlans[i] = m_readyPlans.last();
- m_readyPlans.removeLast();
- }
- }
- }
-
- // No locking needed for this part, see comment in visitWeakReferences().
- for (unsigned i = m_threads.size(); i--;) {
- ThreadData* data = m_threads[i].get();
- Safepoint* safepoint = data->m_safepoint;
- if (!safepoint)
- continue;
- if (&safepoint->vm() != &vm)
- continue;
- if (safepoint->isKnownToBeLiveDuringGC())
- continue;
- safepoint->cancel();
- }
-}
-
size_t Worklist::queueLength()
{
- LockHolder locker(m_lock);
+ MutexLocker locker(m_lock);
return m_queue.size();
}
void Worklist::dump(PrintStream& out) const
{
- LockHolder locker(m_lock);
+ MutexLocker locker(m_lock);
dump(locker, out);
}
-void Worklist::dump(const LockHolder&, PrintStream& out) const
+void Worklist::dump(const MutexLocker&, PrintStream& out) const
{
out.print(
"Worklist(", RawPointer(this), ")[Queue Length = ", m_queue.size(),
@@ -313,7 +208,7 @@ void Worklist::dump(const LockHolder&, PrintStream& out) const
", Num Active Threads = ", m_numberOfActiveThreads, "/", m_threads.size(), "]");
}
-void Worklist::runThread(ThreadData* data)
+void Worklist::runThread()
{
CompilationScope compilationScope;
@@ -325,10 +220,9 @@ void Worklist::runThread(ThreadData* data)
for (;;) {
RefPtr<Plan> plan;
{
- LockHolder locker(m_lock);
+ MutexLocker locker(m_lock);
while (m_queue.isEmpty())
m_planEnqueued.wait(m_lock);
-
plan = m_queue.takeFirst();
if (plan)
m_numberOfActiveThreads++;
@@ -340,45 +234,13 @@ void Worklist::runThread(ThreadData* data)
return;
}
- {
- LockHolder locker(data->m_rightToRun);
- {
- LockHolder locker(m_lock);
- if (plan->stage == Plan::Cancelled) {
- m_numberOfActiveThreads--;
- continue;
- }
- plan->notifyCompiling();
- }
+ if (Options::verboseCompilationQueue())
+ dataLog(*this, ": Compiling ", plan->key(), " asynchronously\n");
- if (Options::verboseCompilationQueue())
- dataLog(*this, ": Compiling ", plan->key(), " asynchronously\n");
+ plan->compileInThread(longLivedState);
- RELEASE_ASSERT(!plan->vm.heap.isCollecting());
- plan->compileInThread(longLivedState, data);
- RELEASE_ASSERT(!plan->vm.heap.isCollecting());
-
- {
- LockHolder locker(m_lock);
- if (plan->stage == Plan::Cancelled) {
- m_numberOfActiveThreads--;
- continue;
- }
- plan->notifyCompiled();
- }
- RELEASE_ASSERT(!plan->vm.heap.isCollecting());
- }
-
{
- LockHolder locker(m_lock);
-
- // We could have been cancelled between releasing rightToRun and acquiring m_lock.
- // This would mean that we might be in the middle of GC right now.
- if (plan->stage == Plan::Cancelled) {
- m_numberOfActiveThreads--;
- continue;
- }
-
+ MutexLocker locker(m_lock);
plan->notifyReady();
if (Options::verboseCompilationQueue()) {
@@ -388,7 +250,7 @@ void Worklist::runThread(ThreadData* data)
m_readyPlans.append(plan);
- m_planCompiled.notifyAll();
+ m_planCompiled.broadcast();
m_numberOfActiveThreads--;
}
}
@@ -396,56 +258,26 @@ void Worklist::runThread(ThreadData* data)
void Worklist::threadFunction(void* argument)
{
- ThreadData* data = static_cast<ThreadData*>(argument);
- data->m_worklist->runThread(data);
+ static_cast<Worklist*>(argument)->runThread();
}
-static Worklist* theGlobalDFGWorklist;
+static Worklist* theGlobalWorklist;
-Worklist* ensureGlobalDFGWorklist()
+Worklist* globalWorklist()
{
static std::once_flag initializeGlobalWorklistOnceFlag;
std::call_once(initializeGlobalWorklistOnceFlag, [] {
- theGlobalDFGWorklist = &Worklist::create("DFG Worklist", Options::numberOfDFGCompilerThreads(), Options::priorityDeltaOfDFGCompilerThreads()).leakRef();
- });
- return theGlobalDFGWorklist;
-}
+ unsigned numberOfThreads;
-Worklist* existingGlobalDFGWorklistOrNull()
-{
- return theGlobalDFGWorklist;
-}
+ if (Options::useExperimentalFTL())
+ numberOfThreads = 1; // We don't yet use LLVM in a thread-safe way.
+ else
+ numberOfThreads = Options::numberOfCompilerThreads();
-static Worklist* theGlobalFTLWorklist;
-
-Worklist* ensureGlobalFTLWorklist()
-{
- static std::once_flag initializeGlobalWorklistOnceFlag;
- std::call_once(initializeGlobalWorklistOnceFlag, [] {
- theGlobalFTLWorklist = &Worklist::create("FTL Worklist", Options::numberOfFTLCompilerThreads(), Options::priorityDeltaOfFTLCompilerThreads()).leakRef();
+ theGlobalWorklist = Worklist::create(numberOfThreads).leakRef();
});
- return theGlobalFTLWorklist;
-}
-
-Worklist* existingGlobalFTLWorklistOrNull()
-{
- return theGlobalFTLWorklist;
-}
-Worklist* ensureGlobalWorklistFor(CompilationMode mode)
-{
- switch (mode) {
- case InvalidCompilationMode:
- RELEASE_ASSERT_NOT_REACHED();
- return 0;
- case DFGMode:
- return ensureGlobalDFGWorklist();
- case FTLMode:
- case FTLForOSREntryMode:
- return ensureGlobalFTLWorklist();
- }
- RELEASE_ASSERT_NOT_REACHED();
- return 0;
+ return theGlobalWorklist;
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGWorklist.h b/Source/JavaScriptCore/dfg/DFGWorklist.h
index 415b26e00..d3419f0a9 100644
--- a/Source/JavaScriptCore/dfg/DFGWorklist.h
+++ b/Source/JavaScriptCore/dfg/DFGWorklist.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,22 +26,18 @@
#ifndef DFGWorklist_h
#define DFGWorklist_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DFGPlan.h"
-#include "DFGThreadData.h"
-#include <wtf/Condition.h>
#include <wtf/Deque.h>
#include <wtf/HashMap.h>
-#include <wtf/Lock.h>
#include <wtf/Noncopyable.h>
+#include <wtf/PassOwnPtr.h>
+#include <wtf/ThreadingPrimitives.h>
-namespace JSC {
-
-class CodeBlockSet;
-class SlotVisitor;
-
-namespace DFG {
+namespace JSC { namespace DFG {
class Worklist : public RefCounted<Worklist> {
public:
@@ -49,7 +45,7 @@ public:
~Worklist();
- static Ref<Worklist> create(CString worklistName, unsigned numberOfThreads, int relativePriority = 0);
+ static PassRefPtr<Worklist> create(unsigned numberOfThreads);
void enqueue(PassRefPtr<Plan>);
@@ -65,33 +61,21 @@ public:
State compilationState(CompilationKey);
size_t queueLength();
-
- void suspendAllThreads();
- void resumeAllThreads();
-
- bool isActiveForVM(VM&) const;
-
- // Only called on the main thread after suspending all threads.
- void visitWeakReferences(SlotVisitor&, CodeBlockSet&);
- void removeDeadPlans(VM&);
-
void dump(PrintStream&) const;
private:
- Worklist(CString worklistName);
- void finishCreation(unsigned numberOfThreads, int);
+ Worklist();
+ void finishCreation(unsigned numberOfThreads);
- void runThread(ThreadData*);
+ void runThread();
static void threadFunction(void* argument);
void removeAllReadyPlansForVM(VM&, Vector<RefPtr<Plan>, 8>&);
- void dump(const LockHolder&, PrintStream&) const;
-
- CString m_threadName;
-
+ void dump(const MutexLocker&, PrintStream&) const;
+
// Used to inform the thread about what work there is left to do.
- Deque<RefPtr<Plan>> m_queue;
+ Deque<RefPtr<Plan>, 16> m_queue;
// Used to answer questions about the current state of a code block. This
// is particularly great for the cti_optimize OSR slow path, which wants
@@ -103,41 +87,18 @@ private:
// Used to quickly find which plans have been compiled and are ready to
// be completed.
Vector<RefPtr<Plan>, 16> m_readyPlans;
-
- Lock m_suspensionLock;
-
- mutable Lock m_lock;
- Condition m_planEnqueued;
- Condition m_planCompiled;
- Vector<std::unique_ptr<ThreadData>> m_threads;
+ mutable Mutex m_lock;
+ ThreadCondition m_planEnqueued;
+ ThreadCondition m_planCompiled;
+ Vector<ThreadIdentifier> m_threads;
unsigned m_numberOfActiveThreads;
};
-// For DFGMode compilations.
-Worklist* ensureGlobalDFGWorklist();
-Worklist* existingGlobalDFGWorklistOrNull();
-
-// For FTLMode and FTLForOSREntryMode compilations.
-Worklist* ensureGlobalFTLWorklist();
-Worklist* existingGlobalFTLWorklistOrNull();
-
-Worklist* ensureGlobalWorklistFor(CompilationMode);
-
-// Simplify doing things for all worklists.
-inline unsigned numberOfWorklists() { return 2; }
-inline Worklist* worklistForIndexOrNull(unsigned index)
-{
- switch (index) {
- case 0:
- return existingGlobalDFGWorklistOrNull();
- case 1:
- return existingGlobalFTLWorklistOrNull();
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return 0;
- }
-}
+// For now we use a single global worklist. It's not clear that this
+// is the right thing to do, but it is what we do, for now. This function
+// will lazily create one when it's needed.
+Worklist* globalWorklist();
} } // namespace JSC::DFG